net: provide generic busy polling to all NAPI drivers
NAPI drivers no longer need to observe a particular protocol to benefit from busy polling (CONFIG_NET_RX_BUSY_POLL=y) napi_hash_add() and napi_hash_del() are automatically called from core networking stack, respectively from netif_napi_add() and netif_napi_del() This patch depends on free_netdev() and netif_napi_del() being called from process context, which seems to be the norm. Drivers might still prefer to call napi_hash_del() on their own, since they might combine all the rcu grace periods into a single one, knowing their NAPI structures lifetime, while core networking stack has no idea of a possible combining. Once this patch proves to not bring serious regressions, we will cleanup drivers to either remove napi_hash_del() or provide appropriate rcu grace periods combining. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
34cbe27e81
commit
93d05d4a32
|
@ -46,7 +46,6 @@ static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
|
||||||
for_each_rx_queue_cnic(bp, i) {
|
for_each_rx_queue_cnic(bp, i) {
|
||||||
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
|
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
|
||||||
bnx2x_poll, NAPI_POLL_WEIGHT);
|
bnx2x_poll, NAPI_POLL_WEIGHT);
|
||||||
napi_hash_add(&bnx2x_fp(bp, i, napi));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,7 +57,6 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
|
||||||
for_each_eth_queue(bp, i) {
|
for_each_eth_queue(bp, i) {
|
||||||
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
|
netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
|
||||||
bnx2x_poll, NAPI_POLL_WEIGHT);
|
bnx2x_poll, NAPI_POLL_WEIGHT);
|
||||||
napi_hash_add(&bnx2x_fp(bp, i, napi));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4227,12 +4227,10 @@ static void bnxt_init_napi(struct bnxt *bp)
|
||||||
bnapi = bp->bnapi[i];
|
bnapi = bp->bnapi[i];
|
||||||
netif_napi_add(bp->dev, &bnapi->napi,
|
netif_napi_add(bp->dev, &bnapi->napi,
|
||||||
bnxt_poll, 64);
|
bnxt_poll, 64);
|
||||||
napi_hash_add(&bnapi->napi);
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
bnapi = bp->bnapi[0];
|
bnapi = bp->bnapi[0];
|
||||||
netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
|
netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
|
||||||
napi_hash_add(&bnapi->napi);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2527,7 +2527,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
|
||||||
goto err;
|
goto err;
|
||||||
|
|
||||||
netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
|
netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
|
||||||
napi_hash_add(&iq->napi);
|
|
||||||
iq->cur_desc = iq->desc;
|
iq->cur_desc = iq->desc;
|
||||||
iq->cidx = 0;
|
iq->cidx = 0;
|
||||||
iq->gen = 1;
|
iq->gen = 1;
|
||||||
|
|
|
@ -2458,13 +2458,11 @@ static int enic_dev_init(struct enic *enic)
|
||||||
switch (vnic_dev_get_intr_mode(enic->vdev)) {
|
switch (vnic_dev_get_intr_mode(enic->vdev)) {
|
||||||
default:
|
default:
|
||||||
netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
|
netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
|
||||||
napi_hash_add(&enic->napi[0]);
|
|
||||||
break;
|
break;
|
||||||
case VNIC_DEV_INTR_MODE_MSIX:
|
case VNIC_DEV_INTR_MODE_MSIX:
|
||||||
for (i = 0; i < enic->rq_count; i++) {
|
for (i = 0; i < enic->rq_count; i++) {
|
||||||
netif_napi_add(netdev, &enic->napi[i],
|
netif_napi_add(netdev, &enic->napi[i],
|
||||||
enic_poll_msix_rq, NAPI_POLL_WEIGHT);
|
enic_poll_msix_rq, NAPI_POLL_WEIGHT);
|
||||||
napi_hash_add(&enic->napi[i]);
|
|
||||||
}
|
}
|
||||||
for (i = 0; i < enic->wq_count; i++)
|
for (i = 0; i < enic->wq_count; i++)
|
||||||
netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
|
netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
|
||||||
|
|
|
@ -2630,7 +2630,6 @@ static int be_evt_queues_create(struct be_adapter *adapter)
|
||||||
eqo->affinity_mask);
|
eqo->affinity_mask);
|
||||||
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
|
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
|
||||||
BE_NAPI_WEIGHT);
|
BE_NAPI_WEIGHT);
|
||||||
napi_hash_add(&eqo->napi);
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -844,7 +844,6 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
|
||||||
/* initialize NAPI */
|
/* initialize NAPI */
|
||||||
netif_napi_add(adapter->netdev, &q_vector->napi,
|
netif_napi_add(adapter->netdev, &q_vector->napi,
|
||||||
ixgbe_poll, 64);
|
ixgbe_poll, 64);
|
||||||
napi_hash_add(&q_vector->napi);
|
|
||||||
|
|
||||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
#ifdef CONFIG_NET_RX_BUSY_POLL
|
||||||
/* initialize busy poll */
|
/* initialize busy poll */
|
||||||
|
|
|
@ -2483,9 +2483,6 @@ static int ixgbevf_alloc_q_vectors(struct ixgbevf_adapter *adapter)
|
||||||
q_vector->v_idx = q_idx;
|
q_vector->v_idx = q_idx;
|
||||||
netif_napi_add(adapter->netdev, &q_vector->napi,
|
netif_napi_add(adapter->netdev, &q_vector->napi,
|
||||||
ixgbevf_poll, 64);
|
ixgbevf_poll, 64);
|
||||||
#ifdef CONFIG_NET_RX_BUSY_POLL
|
|
||||||
napi_hash_add(&q_vector->napi);
|
|
||||||
#endif
|
|
||||||
adapter->q_vector[q_idx] = q_vector;
|
adapter->q_vector[q_idx] = q_vector;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -155,13 +155,11 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
|
||||||
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
|
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
|
||||||
cq->mcq.event = mlx4_en_cq_event;
|
cq->mcq.event = mlx4_en_cq_event;
|
||||||
|
|
||||||
if (cq->is_tx) {
|
if (cq->is_tx)
|
||||||
netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
|
netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
|
||||||
NAPI_POLL_WEIGHT);
|
NAPI_POLL_WEIGHT);
|
||||||
} else {
|
else
|
||||||
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
|
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
|
||||||
napi_hash_add(&cq->napi);
|
|
||||||
}
|
|
||||||
|
|
||||||
napi_enable(&cq->napi);
|
napi_enable(&cq->napi);
|
||||||
|
|
||||||
|
|
|
@ -982,7 +982,6 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
|
||||||
mlx5e_build_channeltc_to_txq_map(priv, ix);
|
mlx5e_build_channeltc_to_txq_map(priv, ix);
|
||||||
|
|
||||||
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
|
netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
|
||||||
napi_hash_add(&c->napi);
|
|
||||||
|
|
||||||
err = mlx5e_open_tx_cqs(c, cparam);
|
err = mlx5e_open_tx_cqs(c, cparam);
|
||||||
if (err)
|
if (err)
|
||||||
|
|
|
@ -3814,7 +3814,6 @@ static int myri10ge_alloc_slices(struct myri10ge_priv *mgp)
|
||||||
ss->dev = mgp->dev;
|
ss->dev = mgp->dev;
|
||||||
netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
|
netif_napi_add(ss->dev, &ss->napi, myri10ge_poll,
|
||||||
myri10ge_napi_weight);
|
myri10ge_napi_weight);
|
||||||
napi_hash_add(&ss->napi);
|
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
abort:
|
abort:
|
||||||
|
|
|
@ -2059,7 +2059,6 @@ static void efx_init_napi_channel(struct efx_channel *channel)
|
||||||
channel->napi_dev = efx->net_dev;
|
channel->napi_dev = efx->net_dev;
|
||||||
netif_napi_add(channel->napi_dev, &channel->napi_str,
|
netif_napi_add(channel->napi_dev, &channel->napi_str,
|
||||||
efx_poll, napi_weight);
|
efx_poll, napi_weight);
|
||||||
napi_hash_add(&channel->napi_str);
|
|
||||||
efx_channel_busy_poll_init(channel);
|
efx_channel_busy_poll_init(channel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1610,7 +1610,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
|
||||||
vi->rq[i].pages = NULL;
|
vi->rq[i].pages = NULL;
|
||||||
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
|
netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
|
||||||
napi_weight);
|
napi_weight);
|
||||||
napi_hash_add(&vi->rq[i].napi);
|
|
||||||
|
|
||||||
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
|
sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
|
||||||
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
|
ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len);
|
||||||
|
|
|
@ -466,6 +466,9 @@ static inline void napi_complete(struct napi_struct *n)
|
||||||
* @napi: napi context
|
* @napi: napi context
|
||||||
*
|
*
|
||||||
* generate a new napi_id and store a @napi under it in napi_hash
|
* generate a new napi_id and store a @napi under it in napi_hash
|
||||||
|
* Used for busy polling (CONFIG_NET_RX_BUSY_POLL)
|
||||||
|
* Note: This is normally automatically done from netif_napi_add(),
|
||||||
|
* so might disappear in a future linux version.
|
||||||
*/
|
*/
|
||||||
void napi_hash_add(struct napi_struct *napi);
|
void napi_hash_add(struct napi_struct *napi);
|
||||||
|
|
||||||
|
@ -476,6 +479,10 @@ void napi_hash_add(struct napi_struct *napi);
|
||||||
* Warning: caller must observe rcu grace period
|
* Warning: caller must observe rcu grace period
|
||||||
* before freeing memory containing @napi, if
|
* before freeing memory containing @napi, if
|
||||||
* this function returns true.
|
* this function returns true.
|
||||||
|
* Note: core networking stack automatically calls it
|
||||||
|
* from netif_napi_del()
|
||||||
|
* Drivers might want to call this helper to combine all
|
||||||
|
* the needed rcu grace periods into a single one.
|
||||||
*/
|
*/
|
||||||
bool napi_hash_del(struct napi_struct *napi);
|
bool napi_hash_del(struct napi_struct *napi);
|
||||||
|
|
||||||
|
|
|
@ -4807,6 +4807,7 @@ void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
|
||||||
napi->poll_owner = -1;
|
napi->poll_owner = -1;
|
||||||
#endif
|
#endif
|
||||||
set_bit(NAPI_STATE_SCHED, &napi->state);
|
set_bit(NAPI_STATE_SCHED, &napi->state);
|
||||||
|
napi_hash_add(napi);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(netif_napi_add);
|
EXPORT_SYMBOL(netif_napi_add);
|
||||||
|
|
||||||
|
@ -4826,8 +4827,12 @@ void napi_disable(struct napi_struct *n)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(napi_disable);
|
EXPORT_SYMBOL(napi_disable);
|
||||||
|
|
||||||
|
/* Must be called in process context */
|
||||||
void netif_napi_del(struct napi_struct *napi)
|
void netif_napi_del(struct napi_struct *napi)
|
||||||
{
|
{
|
||||||
|
might_sleep();
|
||||||
|
if (napi_hash_del(napi))
|
||||||
|
synchronize_net();
|
||||||
list_del_init(&napi->dev_list);
|
list_del_init(&napi->dev_list);
|
||||||
napi_free_frags(napi);
|
napi_free_frags(napi);
|
||||||
|
|
||||||
|
@ -7227,11 +7232,13 @@ EXPORT_SYMBOL(alloc_netdev_mqs);
|
||||||
* This function does the last stage of destroying an allocated device
|
* This function does the last stage of destroying an allocated device
|
||||||
* interface. The reference to the device object is released.
|
* interface. The reference to the device object is released.
|
||||||
* If this is the last reference then it will be freed.
|
* If this is the last reference then it will be freed.
|
||||||
|
* Must be called in process context.
|
||||||
*/
|
*/
|
||||||
void free_netdev(struct net_device *dev)
|
void free_netdev(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct napi_struct *p, *n;
|
struct napi_struct *p, *n;
|
||||||
|
|
||||||
|
might_sleep();
|
||||||
netif_free_tx_queues(dev);
|
netif_free_tx_queues(dev);
|
||||||
#ifdef CONFIG_SYSFS
|
#ifdef CONFIG_SYSFS
|
||||||
kvfree(dev->_rx);
|
kvfree(dev->_rx);
|
||||||
|
|
Loading…
Reference in New Issue