net: extract napi poll functionality to __napi_poll()

This commit introduces a new function __napi_poll() which does the main
logic of the existing napi_poll() function, and will be called by other
functions in later commits.
This idea and implementation is done by Felix Fietkau <nbd@nbd.name> and
is proposed as part of the patch to move napi work to work_queue
context.
This commit by itself is a code restructure.

Signed-off-by: Felix Fietkau <nbd@nbd.name>
Signed-off-by: Wei Wang <weiwan@google.com>
Reviewed-by: Alexander Duyck <alexanderduyck@fb.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Felix Fietkau 2021-02-08 11:34:08 -08:00 committed by David S. Miller
parent 4feffeadbc
commit 898f8015ff
1 changed files with 24 additions and 12 deletions

View File

@ -6776,15 +6776,10 @@ void __netif_napi_del(struct napi_struct *napi)
} }
EXPORT_SYMBOL(__netif_napi_del); EXPORT_SYMBOL(__netif_napi_del);
static int napi_poll(struct napi_struct *n, struct list_head *repoll) static int __napi_poll(struct napi_struct *n, bool *repoll)
{ {
void *have;
int work, weight; int work, weight;
list_del_init(&n->poll_list);
have = netpoll_poll_lock(n);
weight = n->weight; weight = n->weight;
/* This NAPI_STATE_SCHED test is for avoiding a race /* This NAPI_STATE_SCHED test is for avoiding a race
@ -6804,7 +6799,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
n->poll, work, weight); n->poll, work, weight);
if (likely(work < weight)) if (likely(work < weight))
goto out_unlock; return work;
/* Drivers must not modify the NAPI state if they /* Drivers must not modify the NAPI state if they
* consume the entire weight. In such cases this code * consume the entire weight. In such cases this code
@ -6813,7 +6808,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
*/ */
if (unlikely(napi_disable_pending(n))) { if (unlikely(napi_disable_pending(n))) {
napi_complete(n); napi_complete(n);
goto out_unlock; return work;
} }
/* The NAPI context has more processing work, but busy-polling /* The NAPI context has more processing work, but busy-polling
@ -6826,7 +6821,7 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
*/ */
napi_schedule(n); napi_schedule(n);
} }
goto out_unlock; return work;
} }
if (n->gro_bitmask) { if (n->gro_bitmask) {
@ -6844,12 +6839,29 @@ static int napi_poll(struct napi_struct *n, struct list_head *repoll)
if (unlikely(!list_empty(&n->poll_list))) { if (unlikely(!list_empty(&n->poll_list))) {
pr_warn_once("%s: Budget exhausted after napi rescheduled\n", pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
n->dev ? n->dev->name : "backlog"); n->dev ? n->dev->name : "backlog");
goto out_unlock; return work;
} }
list_add_tail(&n->poll_list, repoll); *repoll = true;
return work;
}
static int napi_poll(struct napi_struct *n, struct list_head *repoll)
{
bool do_repoll = false;
void *have;
int work;
list_del_init(&n->poll_list);
have = netpoll_poll_lock(n);
work = __napi_poll(n, &do_repoll);
if (do_repoll)
list_add_tail(&n->poll_list, repoll);
out_unlock:
netpoll_poll_unlock(have); netpoll_poll_unlock(have);
return work; return work;