bpf, devmap: Move drop error path to devmap for XDP_REDIRECT
We want to change the current ndo_xdp_xmit drop semantics because it will allow us to implement better queue overflow handling. This is working towards the larger goal of a XDP TX queue-hook. Move XDP_REDIRECT error path handling from each XDP ethernet driver to devmap code. According to the new APIs, the driver running the ndo_xdp_xmit pointer, will break tx loop whenever the hw reports a tx error and it will just return to devmap caller the number of successfully transmitted frames. It will be devmap responsibility to free dropped frames. Move each XDP ndo_xdp_xmit capable driver to the new APIs: - veth - virtio-net - mvneta - mvpp2 - socionext - amazon ena - bnxt - freescale (dpaa2, dpaa) - xen-frontend - qede - ice - igb - ixgbe - i40e - mlx5 - ti (cpsw, cpsw-new) - tun - sfc Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com> Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org> Reviewed-by: Camelia Groza <camelia.groza@nxp.com> Acked-by: Edward Cree <ecree.xilinx@gmail.com> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com> Acked-by: Shay Agroskin <shayagr@amazon.com> Link: https://lore.kernel.org/bpf/ed670de24f951cfd77590decf0229a0ad7fd12f6.1615201152.git.lorenzo@kernel.org
This commit is contained in:
parent
6b28276512
commit
fdc13979f9
|
@ -300,7 +300,7 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
|
||||||
|
|
||||||
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
|
rc = ena_xdp_tx_map_frame(xdp_ring, tx_info, xdpf, &push_hdr, &push_len);
|
||||||
if (unlikely(rc))
|
if (unlikely(rc))
|
||||||
goto error_drop_packet;
|
return rc;
|
||||||
|
|
||||||
ena_tx_ctx.ena_bufs = tx_info->bufs;
|
ena_tx_ctx.ena_bufs = tx_info->bufs;
|
||||||
ena_tx_ctx.push_header = push_hdr;
|
ena_tx_ctx.push_header = push_hdr;
|
||||||
|
@ -330,8 +330,6 @@ static int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
|
||||||
error_unmap_dma:
|
error_unmap_dma:
|
||||||
ena_unmap_tx_buff(xdp_ring, tx_info);
|
ena_unmap_tx_buff(xdp_ring, tx_info);
|
||||||
tx_info->xdpf = NULL;
|
tx_info->xdpf = NULL;
|
||||||
error_drop_packet:
|
|
||||||
xdp_return_frame(xdpf);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -339,8 +337,8 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
|
||||||
struct xdp_frame **frames, u32 flags)
|
struct xdp_frame **frames, u32 flags)
|
||||||
{
|
{
|
||||||
struct ena_adapter *adapter = netdev_priv(dev);
|
struct ena_adapter *adapter = netdev_priv(dev);
|
||||||
int qid, i, err, drops = 0;
|
|
||||||
struct ena_ring *xdp_ring;
|
struct ena_ring *xdp_ring;
|
||||||
|
int qid, i, nxmit = 0;
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -360,12 +358,9 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
|
||||||
spin_lock(&xdp_ring->xdp_tx_lock);
|
spin_lock(&xdp_ring->xdp_tx_lock);
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
err = ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0);
|
if (ena_xdp_xmit_frame(xdp_ring, dev, frames[i], 0))
|
||||||
/* The descriptor is freed by ena_xdp_xmit_frame in case
|
break;
|
||||||
* of an error.
|
nxmit++;
|
||||||
*/
|
|
||||||
if (err)
|
|
||||||
drops++;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Ring doorbell to make device aware of the packets */
|
/* Ring doorbell to make device aware of the packets */
|
||||||
|
@ -378,7 +373,7 @@ static int ena_xdp_xmit(struct net_device *dev, int n,
|
||||||
spin_unlock(&xdp_ring->xdp_tx_lock);
|
spin_unlock(&xdp_ring->xdp_tx_lock);
|
||||||
|
|
||||||
/* Return number of packets sent */
|
/* Return number of packets sent */
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
|
static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
|
||||||
|
@ -415,7 +410,9 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
|
||||||
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
|
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
|
||||||
spin_lock(&xdp_ring->xdp_tx_lock);
|
spin_lock(&xdp_ring->xdp_tx_lock);
|
||||||
|
|
||||||
ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf, XDP_XMIT_FLUSH);
|
if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
|
||||||
|
XDP_XMIT_FLUSH))
|
||||||
|
xdp_return_frame(xdpf);
|
||||||
|
|
||||||
spin_unlock(&xdp_ring->xdp_tx_lock);
|
spin_unlock(&xdp_ring->xdp_tx_lock);
|
||||||
xdp_stat = &rx_ring->rx_stats.xdp_tx;
|
xdp_stat = &rx_ring->rx_stats.xdp_tx;
|
||||||
|
|
|
@ -217,7 +217,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
||||||
struct pci_dev *pdev = bp->pdev;
|
struct pci_dev *pdev = bp->pdev;
|
||||||
struct bnxt_tx_ring_info *txr;
|
struct bnxt_tx_ring_info *txr;
|
||||||
dma_addr_t mapping;
|
dma_addr_t mapping;
|
||||||
int drops = 0;
|
int nxmit = 0;
|
||||||
int ring;
|
int ring;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -233,21 +233,17 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
||||||
struct xdp_frame *xdp = frames[i];
|
struct xdp_frame *xdp = frames[i];
|
||||||
|
|
||||||
if (!txr || !bnxt_tx_avail(bp, txr) ||
|
if (!txr || !bnxt_tx_avail(bp, txr) ||
|
||||||
!(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP)) {
|
!(bp->bnapi[ring]->flags & BNXT_NAPI_FLAG_XDP))
|
||||||
xdp_return_frame_rx_napi(xdp);
|
break;
|
||||||
drops++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
|
mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (dma_mapping_error(&pdev->dev, mapping)) {
|
if (dma_mapping_error(&pdev->dev, mapping))
|
||||||
xdp_return_frame_rx_napi(xdp);
|
break;
|
||||||
drops++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
|
__bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp);
|
||||||
|
nxmit++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & XDP_XMIT_FLUSH) {
|
if (flags & XDP_XMIT_FLUSH) {
|
||||||
|
@ -256,7 +252,7 @@ int bnxt_xdp_xmit(struct net_device *dev, int num_frames,
|
||||||
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
|
bnxt_db_write(bp, &txr->tx_db, txr->tx_prod);
|
||||||
}
|
}
|
||||||
|
|
||||||
return num_frames - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Under rtnl_lock */
|
/* Under rtnl_lock */
|
||||||
|
|
|
@ -3081,7 +3081,7 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
|
||||||
struct xdp_frame **frames, u32 flags)
|
struct xdp_frame **frames, u32 flags)
|
||||||
{
|
{
|
||||||
struct xdp_frame *xdpf;
|
struct xdp_frame *xdpf;
|
||||||
int i, err, drops = 0;
|
int i, nxmit = 0;
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -3091,14 +3091,12 @@ static int dpaa_xdp_xmit(struct net_device *net_dev, int n,
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
xdpf = frames[i];
|
xdpf = frames[i];
|
||||||
err = dpaa_xdp_xmit_frame(net_dev, xdpf);
|
if (dpaa_xdp_xmit_frame(net_dev, xdpf))
|
||||||
if (err) {
|
break;
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
nxmit++;
|
||||||
drops++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
static int dpaa_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
|
||||||
|
|
|
@ -2431,8 +2431,6 @@ static int dpaa2_eth_xdp_xmit(struct net_device *net_dev, int n,
|
||||||
percpu_stats->tx_packets += enqueued;
|
percpu_stats->tx_packets += enqueued;
|
||||||
for (i = 0; i < enqueued; i++)
|
for (i = 0; i < enqueued; i++)
|
||||||
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
|
percpu_stats->tx_bytes += dpaa2_fd_get_len(&fds[i]);
|
||||||
for (i = enqueued; i < n; i++)
|
|
||||||
xdp_return_frame_rx_napi(frames[i]);
|
|
||||||
|
|
||||||
return enqueued;
|
return enqueued;
|
||||||
}
|
}
|
||||||
|
|
|
@ -3847,8 +3847,8 @@ netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||||
* @frames: array of XDP buffer pointers
|
* @frames: array of XDP buffer pointers
|
||||||
* @flags: XDP extra info
|
* @flags: XDP extra info
|
||||||
*
|
*
|
||||||
* Returns number of frames successfully sent. Frames that fail are
|
* Returns number of frames successfully sent. Failed frames
|
||||||
* free'ed via XDP return API.
|
* will be free'ed by XDP core.
|
||||||
*
|
*
|
||||||
* For error cases, a negative errno code is returned and no-frames
|
* For error cases, a negative errno code is returned and no-frames
|
||||||
* are transmitted (caller must handle freeing frames).
|
* are transmitted (caller must handle freeing frames).
|
||||||
|
@ -3861,7 +3861,7 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
struct i40e_vsi *vsi = np->vsi;
|
struct i40e_vsi *vsi = np->vsi;
|
||||||
struct i40e_pf *pf = vsi->back;
|
struct i40e_pf *pf = vsi->back;
|
||||||
struct i40e_ring *xdp_ring;
|
struct i40e_ring *xdp_ring;
|
||||||
int drops = 0;
|
int nxmit = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
||||||
|
@ -3881,14 +3881,13 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
|
err = i40e_xmit_xdp_ring(xdpf, xdp_ring);
|
||||||
if (err != I40E_XDP_TX) {
|
if (err != I40E_XDP_TX)
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
nxmit++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||||
i40e_xdp_ring_update_tail(xdp_ring);
|
i40e_xdp_ring_update_tail(xdp_ring);
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
|
@ -571,8 +571,8 @@ ice_run_xdp(struct ice_ring *rx_ring, struct xdp_buff *xdp,
|
||||||
* @frames: XDP frames to be transmitted
|
* @frames: XDP frames to be transmitted
|
||||||
* @flags: transmit flags
|
* @flags: transmit flags
|
||||||
*
|
*
|
||||||
* Returns number of frames successfully sent. Frames that fail are
|
* Returns number of frames successfully sent. Failed frames
|
||||||
* free'ed via XDP return API.
|
* will be free'ed by XDP core.
|
||||||
* For error cases, a negative errno code is returned and no-frames
|
* For error cases, a negative errno code is returned and no-frames
|
||||||
* are transmitted (caller must handle freeing frames).
|
* are transmitted (caller must handle freeing frames).
|
||||||
*/
|
*/
|
||||||
|
@ -584,7 +584,7 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
unsigned int queue_index = smp_processor_id();
|
unsigned int queue_index = smp_processor_id();
|
||||||
struct ice_vsi *vsi = np->vsi;
|
struct ice_vsi *vsi = np->vsi;
|
||||||
struct ice_ring *xdp_ring;
|
struct ice_ring *xdp_ring;
|
||||||
int drops = 0, i;
|
int nxmit = 0, i;
|
||||||
|
|
||||||
if (test_bit(__ICE_DOWN, vsi->state))
|
if (test_bit(__ICE_DOWN, vsi->state))
|
||||||
return -ENETDOWN;
|
return -ENETDOWN;
|
||||||
|
@ -601,16 +601,15 @@ ice_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
|
err = ice_xmit_xdp_ring(xdpf->data, xdpf->len, xdp_ring);
|
||||||
if (err != ICE_XDP_TX) {
|
if (err != ICE_XDP_TX)
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
nxmit++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||||
ice_xdp_ring_update_tail(xdp_ring);
|
ice_xdp_ring_update_tail(xdp_ring);
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -2934,7 +2934,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
struct igb_ring *tx_ring;
|
struct igb_ring *tx_ring;
|
||||||
struct netdev_queue *nq;
|
struct netdev_queue *nq;
|
||||||
int drops = 0;
|
int nxmit = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
|
if (unlikely(test_bit(__IGB_DOWN, &adapter->state)))
|
||||||
|
@ -2961,10 +2961,9 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
|
err = igb_xmit_xdp_ring(adapter, tx_ring, xdpf);
|
||||||
if (err != IGB_XDP_TX) {
|
if (err != IGB_XDP_TX)
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
nxmit++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
__netif_tx_unlock(nq);
|
__netif_tx_unlock(nq);
|
||||||
|
@ -2972,7 +2971,7 @@ static int igb_xdp_xmit(struct net_device *dev, int n,
|
||||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||||
igb_xdp_ring_update_tail(tx_ring);
|
igb_xdp_ring_update_tail(tx_ring);
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct net_device_ops igb_netdev_ops = {
|
static const struct net_device_ops igb_netdev_ops = {
|
||||||
|
|
|
@ -10188,7 +10188,7 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
|
||||||
{
|
{
|
||||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||||
struct ixgbe_ring *ring;
|
struct ixgbe_ring *ring;
|
||||||
int drops = 0;
|
int nxmit = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
|
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
|
||||||
|
@ -10212,16 +10212,15 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = ixgbe_xmit_xdp_ring(adapter, xdpf);
|
err = ixgbe_xmit_xdp_ring(adapter, xdpf);
|
||||||
if (err != IXGBE_XDP_TX) {
|
if (err != IXGBE_XDP_TX)
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
nxmit++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||||
ixgbe_xdp_ring_update_tail(ring);
|
ixgbe_xdp_ring_update_tail(ring);
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct net_device_ops ixgbe_netdev_ops = {
|
static const struct net_device_ops ixgbe_netdev_ops = {
|
||||||
|
|
|
@ -2137,7 +2137,7 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
|
||||||
{
|
{
|
||||||
struct mvneta_port *pp = netdev_priv(dev);
|
struct mvneta_port *pp = netdev_priv(dev);
|
||||||
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
|
||||||
int i, nxmit_byte = 0, nxmit = num_frame;
|
int i, nxmit_byte = 0, nxmit = 0;
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
struct mvneta_tx_queue *txq;
|
struct mvneta_tx_queue *txq;
|
||||||
struct netdev_queue *nq;
|
struct netdev_queue *nq;
|
||||||
|
@ -2155,12 +2155,11 @@ mvneta_xdp_xmit(struct net_device *dev, int num_frame,
|
||||||
__netif_tx_lock(nq, cpu);
|
__netif_tx_lock(nq, cpu);
|
||||||
for (i = 0; i < num_frame; i++) {
|
for (i = 0; i < num_frame; i++) {
|
||||||
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
|
ret = mvneta_xdp_submit_frame(pp, txq, frames[i], true);
|
||||||
if (ret == MVNETA_XDP_TX) {
|
if (ret != MVNETA_XDP_TX)
|
||||||
|
break;
|
||||||
|
|
||||||
nxmit_byte += frames[i]->len;
|
nxmit_byte += frames[i]->len;
|
||||||
} else {
|
nxmit++;
|
||||||
xdp_return_frame_rx_napi(frames[i]);
|
|
||||||
nxmit--;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(flags & XDP_XMIT_FLUSH))
|
if (unlikely(flags & XDP_XMIT_FLUSH))
|
||||||
|
|
|
@ -3744,7 +3744,7 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
|
||||||
struct xdp_frame **frames, u32 flags)
|
struct xdp_frame **frames, u32 flags)
|
||||||
{
|
{
|
||||||
struct mvpp2_port *port = netdev_priv(dev);
|
struct mvpp2_port *port = netdev_priv(dev);
|
||||||
int i, nxmit_byte = 0, nxmit = num_frame;
|
int i, nxmit_byte = 0, nxmit = 0;
|
||||||
struct mvpp2_pcpu_stats *stats;
|
struct mvpp2_pcpu_stats *stats;
|
||||||
u16 txq_id;
|
u16 txq_id;
|
||||||
u32 ret;
|
u32 ret;
|
||||||
|
@ -3762,12 +3762,11 @@ mvpp2_xdp_xmit(struct net_device *dev, int num_frame,
|
||||||
|
|
||||||
for (i = 0; i < num_frame; i++) {
|
for (i = 0; i < num_frame; i++) {
|
||||||
ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
|
ret = mvpp2_xdp_submit_frame(port, txq_id, frames[i], true);
|
||||||
if (ret == MVPP2_XDP_TX) {
|
if (ret != MVPP2_XDP_TX)
|
||||||
|
break;
|
||||||
|
|
||||||
nxmit_byte += frames[i]->len;
|
nxmit_byte += frames[i]->len;
|
||||||
} else {
|
nxmit++;
|
||||||
xdp_return_frame_rx_napi(frames[i]);
|
|
||||||
nxmit--;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (likely(nxmit > 0))
|
if (likely(nxmit > 0))
|
||||||
|
|
|
@ -500,7 +500,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
{
|
{
|
||||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||||
struct mlx5e_xdpsq *sq;
|
struct mlx5e_xdpsq *sq;
|
||||||
int drops = 0;
|
int nxmit = 0;
|
||||||
int sq_num;
|
int sq_num;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
@ -529,11 +529,8 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
|
xdptxd.dma_addr = dma_map_single(sq->pdev, xdptxd.data,
|
||||||
xdptxd.len, DMA_TO_DEVICE);
|
xdptxd.len, DMA_TO_DEVICE);
|
||||||
|
|
||||||
if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr))) {
|
if (unlikely(dma_mapping_error(sq->pdev, xdptxd.dma_addr)))
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
|
xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
|
||||||
xdpi.frame.xdpf = xdpf;
|
xdpi.frame.xdpf = xdpf;
|
||||||
|
@ -544,9 +541,9 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
if (unlikely(!ret)) {
|
if (unlikely(!ret)) {
|
||||||
dma_unmap_single(sq->pdev, xdptxd.dma_addr,
|
dma_unmap_single(sq->pdev, xdptxd.dma_addr,
|
||||||
xdptxd.len, DMA_TO_DEVICE);
|
xdptxd.len, DMA_TO_DEVICE);
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
|
||||||
}
|
}
|
||||||
|
nxmit++;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & XDP_XMIT_FLUSH) {
|
if (flags & XDP_XMIT_FLUSH) {
|
||||||
|
@ -555,7 +552,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||||
mlx5e_xmit_xdp_doorbell(sq);
|
mlx5e_xmit_xdp_doorbell(sq);
|
||||||
}
|
}
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
|
void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq)
|
||||||
|
|
|
@ -345,7 +345,7 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames,
|
||||||
struct qede_tx_queue *xdp_tx;
|
struct qede_tx_queue *xdp_tx;
|
||||||
struct xdp_frame *xdpf;
|
struct xdp_frame *xdpf;
|
||||||
dma_addr_t mapping;
|
dma_addr_t mapping;
|
||||||
int i, drops = 0;
|
int i, nxmit = 0;
|
||||||
u16 xdp_prod;
|
u16 xdp_prod;
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||||
|
@ -364,18 +364,13 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames,
|
||||||
|
|
||||||
mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
|
mapping = dma_map_single(dmadev, xdpf->data, xdpf->len,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
if (unlikely(dma_mapping_error(dmadev, mapping))) {
|
if (unlikely(dma_mapping_error(dmadev, mapping)))
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
|
||||||
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
|
if (unlikely(qede_xdp_xmit(xdp_tx, mapping, 0, xdpf->len,
|
||||||
NULL, xdpf))) {
|
NULL, xdpf)))
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
nxmit++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (flags & XDP_XMIT_FLUSH) {
|
if (flags & XDP_XMIT_FLUSH) {
|
||||||
|
@ -387,7 +382,7 @@ int qede_xdp_transmit(struct net_device *dev, int n_frames,
|
||||||
|
|
||||||
spin_unlock(&xdp_tx->xdp_tx_lock);
|
spin_unlock(&xdp_tx->xdp_tx_lock);
|
||||||
|
|
||||||
return n_frames - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
int qede_txq_has_work(struct qede_tx_queue *txq)
|
int qede_txq_has_work(struct qede_tx_queue *txq)
|
||||||
|
|
|
@ -412,14 +412,6 @@ err:
|
||||||
return NETDEV_TX_OK;
|
return NETDEV_TX_OK;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void efx_xdp_return_frames(int n, struct xdp_frame **xdpfs)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < n; i++)
|
|
||||||
xdp_return_frame_rx_napi(xdpfs[i]);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Transmit a packet from an XDP buffer
|
/* Transmit a packet from an XDP buffer
|
||||||
*
|
*
|
||||||
* Returns number of packets sent on success, error code otherwise.
|
* Returns number of packets sent on success, error code otherwise.
|
||||||
|
@ -492,12 +484,7 @@ int efx_xdp_tx_buffers(struct efx_nic *efx, int n, struct xdp_frame **xdpfs,
|
||||||
if (flush && i > 0)
|
if (flush && i > 0)
|
||||||
efx_nic_push_buffers(tx_queue);
|
efx_nic_push_buffers(tx_queue);
|
||||||
|
|
||||||
if (i == 0)
|
return i == 0 ? -EIO : i;
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
efx_xdp_return_frames(n - i, xdpfs + i);
|
|
||||||
|
|
||||||
return i;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Initiate a packet transmission. We use one channel per CPU
|
/* Initiate a packet transmission. We use one channel per CPU
|
||||||
|
|
|
@ -1757,8 +1757,7 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n,
|
||||||
{
|
{
|
||||||
struct netsec_priv *priv = netdev_priv(ndev);
|
struct netsec_priv *priv = netdev_priv(ndev);
|
||||||
struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
|
struct netsec_desc_ring *tx_ring = &priv->desc_ring[NETSEC_RING_TX];
|
||||||
int drops = 0;
|
int i, nxmit = 0;
|
||||||
int i;
|
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1769,12 +1768,11 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n,
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
err = netsec_xdp_queue_one(priv, xdpf, true);
|
err = netsec_xdp_queue_one(priv, xdpf, true);
|
||||||
if (err != NETSEC_XDP_TX) {
|
if (err != NETSEC_XDP_TX)
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
|
||||||
} else {
|
|
||||||
tx_ring->xdp_xmit++;
|
tx_ring->xdp_xmit++;
|
||||||
}
|
nxmit++;
|
||||||
}
|
}
|
||||||
spin_unlock(&tx_ring->lock);
|
spin_unlock(&tx_ring->lock);
|
||||||
|
|
||||||
|
@ -1783,7 +1781,7 @@ static int netsec_xdp_xmit(struct net_device *ndev, int n,
|
||||||
tx_ring->xdp_xmit = 0;
|
tx_ring->xdp_xmit = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
|
static int netsec_xdp_setup(struct netsec_priv *priv, struct bpf_prog *prog,
|
||||||
|
|
|
@ -1123,25 +1123,23 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
|
||||||
struct cpsw_priv *priv = netdev_priv(ndev);
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
||||||
struct cpsw_common *cpsw = priv->cpsw;
|
struct cpsw_common *cpsw = priv->cpsw;
|
||||||
struct xdp_frame *xdpf;
|
struct xdp_frame *xdpf;
|
||||||
int i, drops = 0, port;
|
int i, nxmit = 0, port;
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
xdpf = frames[i];
|
xdpf = frames[i];
|
||||||
if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
|
if (xdpf->len < CPSW_MIN_PACKET_SIZE)
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
port = priv->emac_port + cpsw->data.dual_emac;
|
port = priv->emac_port + cpsw->data.dual_emac;
|
||||||
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
|
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, port))
|
||||||
drops++;
|
break;
|
||||||
|
nxmit++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||||
|
|
|
@ -1093,24 +1093,22 @@ static int cpsw_ndo_xdp_xmit(struct net_device *ndev, int n,
|
||||||
{
|
{
|
||||||
struct cpsw_priv *priv = netdev_priv(ndev);
|
struct cpsw_priv *priv = netdev_priv(ndev);
|
||||||
struct xdp_frame *xdpf;
|
struct xdp_frame *xdpf;
|
||||||
int i, drops = 0;
|
int i, nxmit = 0;
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
xdpf = frames[i];
|
xdpf = frames[i];
|
||||||
if (xdpf->len < CPSW_MIN_PACKET_SIZE) {
|
if (xdpf->len < CPSW_MIN_PACKET_SIZE)
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
break;
|
||||||
drops++;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
|
if (cpsw_xdp_tx_frame(priv, xdpf, NULL, priv->emac_port))
|
||||||
drops++;
|
break;
|
||||||
|
nxmit++;
|
||||||
}
|
}
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int cpsw_get_port_parent_id(struct net_device *ndev,
|
static int cpsw_get_port_parent_id(struct net_device *ndev,
|
||||||
|
|
|
@ -1305,19 +1305,15 @@ int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
|
||||||
ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
|
ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
|
||||||
dma, xdpf->len, port);
|
dma, xdpf->len, port);
|
||||||
} else {
|
} else {
|
||||||
if (sizeof(*xmeta) > xdpf->headroom) {
|
if (sizeof(*xmeta) > xdpf->headroom)
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
|
||||||
|
|
||||||
ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
|
ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
|
||||||
xdpf->data, xdpf->len, port);
|
xdpf->data, xdpf->len, port);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ret) {
|
if (ret)
|
||||||
priv->ndev->stats.tx_dropped++;
|
priv->ndev->stats.tx_dropped++;
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
|
||||||
}
|
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
@ -1353,7 +1349,8 @@ int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
goto drop;
|
goto drop;
|
||||||
|
|
||||||
cpsw_xdp_tx_frame(priv, xdpf, page, port);
|
if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
|
||||||
|
xdp_return_frame_rx_napi(xdpf);
|
||||||
break;
|
break;
|
||||||
case XDP_REDIRECT:
|
case XDP_REDIRECT:
|
||||||
if (xdp_do_redirect(ndev, xdp, prog))
|
if (xdp_do_redirect(ndev, xdp, prog))
|
||||||
|
|
|
@ -1181,8 +1181,7 @@ static int tun_xdp_xmit(struct net_device *dev, int n,
|
||||||
struct tun_struct *tun = netdev_priv(dev);
|
struct tun_struct *tun = netdev_priv(dev);
|
||||||
struct tun_file *tfile;
|
struct tun_file *tfile;
|
||||||
u32 numqueues;
|
u32 numqueues;
|
||||||
int drops = 0;
|
int nxmit = 0;
|
||||||
int cnt = n;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||||
|
@ -1212,9 +1211,9 @@ resample:
|
||||||
|
|
||||||
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
|
if (__ptr_ring_produce(&tfile->tx_ring, frame)) {
|
||||||
atomic_long_inc(&dev->tx_dropped);
|
atomic_long_inc(&dev->tx_dropped);
|
||||||
xdp_return_frame_rx_napi(xdp);
|
break;
|
||||||
drops++;
|
|
||||||
}
|
}
|
||||||
|
nxmit++;
|
||||||
}
|
}
|
||||||
spin_unlock(&tfile->tx_ring.producer_lock);
|
spin_unlock(&tfile->tx_ring.producer_lock);
|
||||||
|
|
||||||
|
@ -1222,17 +1221,21 @@ resample:
|
||||||
__tun_xdp_flush_tfile(tfile);
|
__tun_xdp_flush_tfile(tfile);
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
return cnt - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
|
static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
|
||||||
{
|
{
|
||||||
struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
|
struct xdp_frame *frame = xdp_convert_buff_to_frame(xdp);
|
||||||
|
int nxmit;
|
||||||
|
|
||||||
if (unlikely(!frame))
|
if (unlikely(!frame))
|
||||||
return -EOVERFLOW;
|
return -EOVERFLOW;
|
||||||
|
|
||||||
return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
|
nxmit = tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
|
||||||
|
if (!nxmit)
|
||||||
|
xdp_return_frame_rx_napi(frame);
|
||||||
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct net_device_ops tap_netdev_ops = {
|
static const struct net_device_ops tap_netdev_ops = {
|
||||||
|
|
|
@ -434,7 +434,7 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
|
||||||
u32 flags, bool ndo_xmit)
|
u32 flags, bool ndo_xmit)
|
||||||
{
|
{
|
||||||
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
|
struct veth_priv *rcv_priv, *priv = netdev_priv(dev);
|
||||||
int i, ret = -ENXIO, drops = 0;
|
int i, ret = -ENXIO, nxmit = 0;
|
||||||
struct net_device *rcv;
|
struct net_device *rcv;
|
||||||
unsigned int max_len;
|
unsigned int max_len;
|
||||||
struct veth_rq *rq;
|
struct veth_rq *rq;
|
||||||
|
@ -464,21 +464,20 @@ static int veth_xdp_xmit(struct net_device *dev, int n,
|
||||||
void *ptr = veth_xdp_to_ptr(frame);
|
void *ptr = veth_xdp_to_ptr(frame);
|
||||||
|
|
||||||
if (unlikely(frame->len > max_len ||
|
if (unlikely(frame->len > max_len ||
|
||||||
__ptr_ring_produce(&rq->xdp_ring, ptr))) {
|
__ptr_ring_produce(&rq->xdp_ring, ptr)))
|
||||||
xdp_return_frame_rx_napi(frame);
|
break;
|
||||||
drops++;
|
nxmit++;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
spin_unlock(&rq->xdp_ring.producer_lock);
|
spin_unlock(&rq->xdp_ring.producer_lock);
|
||||||
|
|
||||||
if (flags & XDP_XMIT_FLUSH)
|
if (flags & XDP_XMIT_FLUSH)
|
||||||
__veth_xdp_flush(rq);
|
__veth_xdp_flush(rq);
|
||||||
|
|
||||||
ret = n - drops;
|
ret = nxmit;
|
||||||
if (ndo_xmit) {
|
if (ndo_xmit) {
|
||||||
u64_stats_update_begin(&rq->stats.syncp);
|
u64_stats_update_begin(&rq->stats.syncp);
|
||||||
rq->stats.vs.peer_tq_xdp_xmit += n - drops;
|
rq->stats.vs.peer_tq_xdp_xmit += nxmit;
|
||||||
rq->stats.vs.peer_tq_xdp_xmit_err += drops;
|
rq->stats.vs.peer_tq_xdp_xmit_err += n - nxmit;
|
||||||
u64_stats_update_end(&rq->stats.syncp);
|
u64_stats_update_end(&rq->stats.syncp);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -505,20 +504,23 @@ static int veth_ndo_xdp_xmit(struct net_device *dev, int n,
|
||||||
|
|
||||||
static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
|
static void veth_xdp_flush_bq(struct veth_rq *rq, struct veth_xdp_tx_bq *bq)
|
||||||
{
|
{
|
||||||
int sent, i, err = 0;
|
int sent, i, err = 0, drops;
|
||||||
|
|
||||||
sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
|
sent = veth_xdp_xmit(rq->dev, bq->count, bq->q, 0, false);
|
||||||
if (sent < 0) {
|
if (sent < 0) {
|
||||||
err = sent;
|
err = sent;
|
||||||
sent = 0;
|
sent = 0;
|
||||||
for (i = 0; i < bq->count; i++)
|
|
||||||
xdp_return_frame(bq->q[i]);
|
|
||||||
}
|
}
|
||||||
trace_xdp_bulk_tx(rq->dev, sent, bq->count - sent, err);
|
|
||||||
|
for (i = sent; unlikely(i < bq->count); i++)
|
||||||
|
xdp_return_frame(bq->q[i]);
|
||||||
|
|
||||||
|
drops = bq->count - sent;
|
||||||
|
trace_xdp_bulk_tx(rq->dev, sent, drops, err);
|
||||||
|
|
||||||
u64_stats_update_begin(&rq->stats.syncp);
|
u64_stats_update_begin(&rq->stats.syncp);
|
||||||
rq->stats.vs.xdp_tx += sent;
|
rq->stats.vs.xdp_tx += sent;
|
||||||
rq->stats.vs.xdp_tx_err += bq->count - sent;
|
rq->stats.vs.xdp_tx_err += drops;
|
||||||
u64_stats_update_end(&rq->stats.syncp);
|
u64_stats_update_end(&rq->stats.syncp);
|
||||||
|
|
||||||
bq->count = 0;
|
bq->count = 0;
|
||||||
|
|
|
@ -499,10 +499,10 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
int packets = 0;
|
int packets = 0;
|
||||||
int bytes = 0;
|
int bytes = 0;
|
||||||
int drops = 0;
|
int nxmit = 0;
|
||||||
int kicks = 0;
|
int kicks = 0;
|
||||||
int ret, err;
|
|
||||||
void *ptr;
|
void *ptr;
|
||||||
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
|
/* Only allow ndo_xdp_xmit if XDP is loaded on dev, as this
|
||||||
|
@ -516,7 +516,6 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK)) {
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
drops = n;
|
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -539,13 +538,11 @@ static int virtnet_xdp_xmit(struct net_device *dev,
|
||||||
for (i = 0; i < n; i++) {
|
for (i = 0; i < n; i++) {
|
||||||
struct xdp_frame *xdpf = frames[i];
|
struct xdp_frame *xdpf = frames[i];
|
||||||
|
|
||||||
err = __virtnet_xdp_xmit_one(vi, sq, xdpf);
|
if (__virtnet_xdp_xmit_one(vi, sq, xdpf))
|
||||||
if (err) {
|
break;
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
nxmit++;
|
||||||
drops++;
|
|
||||||
}
|
}
|
||||||
}
|
ret = nxmit;
|
||||||
ret = n - drops;
|
|
||||||
|
|
||||||
if (flags & XDP_XMIT_FLUSH) {
|
if (flags & XDP_XMIT_FLUSH) {
|
||||||
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
|
if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq))
|
||||||
|
@ -556,7 +553,7 @@ out:
|
||||||
sq->stats.bytes += bytes;
|
sq->stats.bytes += bytes;
|
||||||
sq->stats.packets += packets;
|
sq->stats.packets += packets;
|
||||||
sq->stats.xdp_tx += n;
|
sq->stats.xdp_tx += n;
|
||||||
sq->stats.xdp_tx_drops += drops;
|
sq->stats.xdp_tx_drops += n - nxmit;
|
||||||
sq->stats.kicks += kicks;
|
sq->stats.kicks += kicks;
|
||||||
u64_stats_update_end(&sq->stats.syncp);
|
u64_stats_update_end(&sq->stats.syncp);
|
||||||
|
|
||||||
|
@ -709,7 +706,9 @@ static struct sk_buff *receive_small(struct net_device *dev,
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
goto err_xdp;
|
goto err_xdp;
|
||||||
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
||||||
if (unlikely(err < 0)) {
|
if (unlikely(!err)) {
|
||||||
|
xdp_return_frame_rx_napi(xdpf);
|
||||||
|
} else if (unlikely(err < 0)) {
|
||||||
trace_xdp_exception(vi->dev, xdp_prog, act);
|
trace_xdp_exception(vi->dev, xdp_prog, act);
|
||||||
goto err_xdp;
|
goto err_xdp;
|
||||||
}
|
}
|
||||||
|
@ -896,7 +895,9 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
|
||||||
if (unlikely(!xdpf))
|
if (unlikely(!xdpf))
|
||||||
goto err_xdp;
|
goto err_xdp;
|
||||||
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
err = virtnet_xdp_xmit(dev, 1, &xdpf, 0);
|
||||||
if (unlikely(err < 0)) {
|
if (unlikely(!err)) {
|
||||||
|
xdp_return_frame_rx_napi(xdpf);
|
||||||
|
} else if (unlikely(err < 0)) {
|
||||||
trace_xdp_exception(vi->dev, xdp_prog, act);
|
trace_xdp_exception(vi->dev, xdp_prog, act);
|
||||||
if (unlikely(xdp_page != page))
|
if (unlikely(xdp_page != page))
|
||||||
put_page(xdp_page);
|
put_page(xdp_page);
|
||||||
|
|
|
@ -608,8 +608,8 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
|
||||||
struct netfront_info *np = netdev_priv(dev);
|
struct netfront_info *np = netdev_priv(dev);
|
||||||
struct netfront_queue *queue = NULL;
|
struct netfront_queue *queue = NULL;
|
||||||
unsigned long irq_flags;
|
unsigned long irq_flags;
|
||||||
int drops = 0;
|
int nxmit = 0;
|
||||||
int i, err;
|
int i;
|
||||||
|
|
||||||
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -622,15 +622,13 @@ static int xennet_xdp_xmit(struct net_device *dev, int n,
|
||||||
|
|
||||||
if (!xdpf)
|
if (!xdpf)
|
||||||
continue;
|
continue;
|
||||||
err = xennet_xdp_xmit_one(dev, queue, xdpf);
|
if (xennet_xdp_xmit_one(dev, queue, xdpf))
|
||||||
if (err) {
|
break;
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
nxmit++;
|
||||||
drops++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
|
spin_unlock_irqrestore(&queue->tx_lock, irq_flags);
|
||||||
|
|
||||||
return n - drops;
|
return nxmit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -875,7 +873,9 @@ static u32 xennet_run_xdp(struct netfront_queue *queue, struct page *pdata,
|
||||||
get_page(pdata);
|
get_page(pdata);
|
||||||
xdpf = xdp_convert_buff_to_frame(xdp);
|
xdpf = xdp_convert_buff_to_frame(xdp);
|
||||||
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
|
err = xennet_xdp_xmit(queue->info->netdev, 1, &xdpf, 0);
|
||||||
if (unlikely(err < 0))
|
if (unlikely(!err))
|
||||||
|
xdp_return_frame_rx_napi(xdpf);
|
||||||
|
else if (unlikely(err < 0))
|
||||||
trace_xdp_exception(queue->info->netdev, prog, act);
|
trace_xdp_exception(queue->info->netdev, prog, act);
|
||||||
break;
|
break;
|
||||||
case XDP_REDIRECT:
|
case XDP_REDIRECT:
|
||||||
|
|
|
@ -329,7 +329,7 @@ bool dev_map_can_have_prog(struct bpf_map *map)
|
||||||
static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
||||||
{
|
{
|
||||||
struct net_device *dev = bq->dev;
|
struct net_device *dev = bq->dev;
|
||||||
int sent = 0, drops = 0, err = 0;
|
int sent = 0, err = 0;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (unlikely(!bq->count))
|
if (unlikely(!bq->count))
|
||||||
|
@ -343,29 +343,23 @@ static void bq_xmit_all(struct xdp_dev_bulk_queue *bq, u32 flags)
|
||||||
|
|
||||||
sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
|
sent = dev->netdev_ops->ndo_xdp_xmit(dev, bq->count, bq->q, flags);
|
||||||
if (sent < 0) {
|
if (sent < 0) {
|
||||||
|
/* If ndo_xdp_xmit fails with an errno, no frames have
|
||||||
|
* been xmit'ed.
|
||||||
|
*/
|
||||||
err = sent;
|
err = sent;
|
||||||
sent = 0;
|
sent = 0;
|
||||||
goto error;
|
|
||||||
}
|
}
|
||||||
drops = bq->count - sent;
|
|
||||||
out:
|
|
||||||
bq->count = 0;
|
|
||||||
|
|
||||||
trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, drops, err);
|
/* If not all frames have been transmitted, it is our
|
||||||
bq->dev_rx = NULL;
|
* responsibility to free them
|
||||||
__list_del_clearprev(&bq->flush_node);
|
|
||||||
return;
|
|
||||||
error:
|
|
||||||
/* If ndo_xdp_xmit fails with an errno, no frames have been
|
|
||||||
* xmit'ed and it's our responsibility to them free all.
|
|
||||||
*/
|
*/
|
||||||
for (i = 0; i < bq->count; i++) {
|
for (i = sent; unlikely(i < bq->count); i++)
|
||||||
struct xdp_frame *xdpf = bq->q[i];
|
xdp_return_frame_rx_napi(bq->q[i]);
|
||||||
|
|
||||||
xdp_return_frame_rx_napi(xdpf);
|
trace_xdp_devmap_xmit(bq->dev_rx, dev, sent, bq->count - sent, err);
|
||||||
drops++;
|
bq->dev_rx = NULL;
|
||||||
}
|
bq->count = 0;
|
||||||
goto out;
|
__list_del_clearprev(&bq->flush_node);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
|
/* __dev_flush is called from xdp_do_flush() which _must_ be signaled
|
||||||
|
|
Loading…
Reference in New Issue