Merge branch 'bpf-xdp-remove-xdp-flush'
Jesper Dangaard Brouer says: ==================== This patchset removes the net_device operation ndo_xdp_flush() call. This is a follow merge commitea9916ea3e
("Merge branch 'ndo_xdp_xmit-cleanup'"). As after commitc1ece6b245
("bpf/xdp: devmap can avoid calling ndo_xdp_flush") no callers of ndo_xdp_flush are left in bpf-next tree. ==================== Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
This commit is contained in:
commit
763f9b414a
|
@ -11883,7 +11883,6 @@ static const struct net_device_ops i40e_netdev_ops = {
|
|||
.ndo_bridge_setlink = i40e_ndo_bridge_setlink,
|
||||
.ndo_bpf = i40e_xdp,
|
||||
.ndo_xdp_xmit = i40e_xdp_xmit,
|
||||
.ndo_xdp_flush = i40e_xdp_flush,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -3707,22 +3707,3 @@ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
|||
|
||||
return n - drops;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_xdp_flush - Implements ndo_xdp_flush
|
||||
* @dev: netdev
|
||||
**/
|
||||
void i40e_xdp_flush(struct net_device *dev)
|
||||
{
|
||||
struct i40e_netdev_priv *np = netdev_priv(dev);
|
||||
unsigned int queue_index = smp_processor_id();
|
||||
struct i40e_vsi *vsi = np->vsi;
|
||||
|
||||
if (test_bit(__I40E_VSI_DOWN, vsi->state))
|
||||
return;
|
||||
|
||||
if (!i40e_enabled_xdp_vsi(vsi) || queue_index >= vsi->num_queue_pairs)
|
||||
return;
|
||||
|
||||
i40e_xdp_ring_update_tail(vsi->xdp_rings[queue_index]);
|
||||
}
|
||||
|
|
|
@ -489,7 +489,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
|
|||
bool __i40e_chk_linearize(struct sk_buff *skb);
|
||||
int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
|
||||
u32 flags);
|
||||
void i40e_xdp_flush(struct net_device *dev);
|
||||
|
||||
/**
|
||||
* i40e_get_head - Retrieve head from head writeback
|
||||
|
|
|
@ -10069,26 +10069,6 @@ static int ixgbe_xdp_xmit(struct net_device *dev, int n,
|
|||
return n - drops;
|
||||
}
|
||||
|
||||
static void ixgbe_xdp_flush(struct net_device *dev)
|
||||
{
|
||||
struct ixgbe_adapter *adapter = netdev_priv(dev);
|
||||
struct ixgbe_ring *ring;
|
||||
|
||||
/* Its possible the device went down between xdp xmit and flush so
|
||||
* we need to ensure device is still up.
|
||||
*/
|
||||
if (unlikely(test_bit(__IXGBE_DOWN, &adapter->state)))
|
||||
return;
|
||||
|
||||
ring = adapter->xdp_prog ? adapter->xdp_ring[smp_processor_id()] : NULL;
|
||||
if (unlikely(!ring))
|
||||
return;
|
||||
|
||||
ixgbe_xdp_ring_update_tail(ring);
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
static const struct net_device_ops ixgbe_netdev_ops = {
|
||||
.ndo_open = ixgbe_open,
|
||||
.ndo_stop = ixgbe_close,
|
||||
|
@ -10136,7 +10116,6 @@ static const struct net_device_ops ixgbe_netdev_ops = {
|
|||
.ndo_features_check = ixgbe_features_check,
|
||||
.ndo_bpf = ixgbe_xdp,
|
||||
.ndo_xdp_xmit = ixgbe_xdp_xmit,
|
||||
.ndo_xdp_flush = ixgbe_xdp_flush,
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
|
@ -1347,26 +1347,7 @@ static int tun_xdp_tx(struct net_device *dev, struct xdp_buff *xdp)
|
|||
if (unlikely(!frame))
|
||||
return -EOVERFLOW;
|
||||
|
||||
return tun_xdp_xmit(dev, 1, &frame, 0);
|
||||
}
|
||||
|
||||
static void tun_xdp_flush(struct net_device *dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
struct tun_file *tfile;
|
||||
u32 numqueues;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
numqueues = READ_ONCE(tun->numqueues);
|
||||
if (!numqueues)
|
||||
goto out;
|
||||
|
||||
tfile = rcu_dereference(tun->tfiles[smp_processor_id() %
|
||||
numqueues]);
|
||||
__tun_xdp_flush_tfile(tfile);
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
return tun_xdp_xmit(dev, 1, &frame, XDP_XMIT_FLUSH);
|
||||
}
|
||||
|
||||
static const struct net_device_ops tap_netdev_ops = {
|
||||
|
@ -1387,7 +1368,6 @@ static const struct net_device_ops tap_netdev_ops = {
|
|||
.ndo_get_stats64 = tun_net_get_stats64,
|
||||
.ndo_bpf = tun_xdp,
|
||||
.ndo_xdp_xmit = tun_xdp_xmit,
|
||||
.ndo_xdp_flush = tun_xdp_flush,
|
||||
};
|
||||
|
||||
static void tun_flow_init(struct tun_struct *tun)
|
||||
|
@ -1706,7 +1686,6 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
|
|||
alloc_frag->offset += buflen;
|
||||
if (tun_xdp_tx(tun->dev, &xdp))
|
||||
goto err_redirect;
|
||||
tun_xdp_flush(tun->dev);
|
||||
rcu_read_unlock();
|
||||
preempt_enable();
|
||||
return NULL;
|
||||
|
|
|
@ -407,18 +407,6 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
|
|||
return skb;
|
||||
}
|
||||
|
||||
static void virtnet_xdp_flush(struct net_device *dev)
|
||||
{
|
||||
struct virtnet_info *vi = netdev_priv(dev);
|
||||
struct send_queue *sq;
|
||||
unsigned int qp;
|
||||
|
||||
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs + smp_processor_id();
|
||||
sq = &vi->sq[qp];
|
||||
|
||||
virtqueue_kick(sq->vq);
|
||||
}
|
||||
|
||||
static int __virtnet_xdp_xmit_one(struct virtnet_info *vi,
|
||||
struct send_queue *sq,
|
||||
struct xdp_frame *xdpf)
|
||||
|
@ -2359,7 +2347,6 @@ static const struct net_device_ops virtnet_netdev = {
|
|||
#endif
|
||||
.ndo_bpf = virtnet_xdp,
|
||||
.ndo_xdp_xmit = virtnet_xdp_xmit,
|
||||
.ndo_xdp_flush = virtnet_xdp_flush,
|
||||
.ndo_features_check = passthru_features_check,
|
||||
};
|
||||
|
||||
|
|
|
@ -1192,9 +1192,6 @@ struct dev_ifalias {
|
|||
* that got dropped are freed/returned via xdp_return_frame().
|
||||
* Returns negative number, means general error invoking ndo, meaning
|
||||
* no frames were xmit'ed and core-caller will free all frames.
|
||||
* void (*ndo_xdp_flush)(struct net_device *dev);
|
||||
* This function is used to inform the driver to flush a particular
|
||||
* xdp tx queue. Must be called on same CPU as xdp_xmit.
|
||||
*/
|
||||
struct net_device_ops {
|
||||
int (*ndo_init)(struct net_device *dev);
|
||||
|
@ -1382,7 +1379,6 @@ struct net_device_ops {
|
|||
int (*ndo_xdp_xmit)(struct net_device *dev, int n,
|
||||
struct xdp_frame **xdp,
|
||||
u32 flags);
|
||||
void (*ndo_xdp_flush)(struct net_device *dev);
|
||||
};
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue