tcp: tsq: avoid one atomic in tcp_wfree()
Under high load, tcp_wfree() has an atomic operation trying to schedule a tasklet over and over. We can schedule it only if our per cpu list was empty. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
b223feb9de
commit
a9b204d156
|
@ -880,6 +880,7 @@ void tcp_wfree(struct sk_buff *skb)
|
|||
|
||||
for (oval = READ_ONCE(tp->tsq_flags);; oval = nval) {
|
||||
struct tsq_tasklet *tsq;
|
||||
bool empty;
|
||||
|
||||
if (!(oval & TSQF_THROTTLED) || (oval & TSQF_QUEUED))
|
||||
goto out;
|
||||
|
@ -892,8 +893,10 @@ void tcp_wfree(struct sk_buff *skb)
|
|||
/* queue this socket to tasklet queue */
|
||||
local_irq_save(flags);
|
||||
tsq = this_cpu_ptr(&tsq_tasklet);
|
||||
empty = list_empty(&tsq->head);
|
||||
list_add(&tp->tsq_node, &tsq->head);
|
||||
tasklet_schedule(&tsq->tasklet);
|
||||
if (empty)
|
||||
tasklet_schedule(&tsq->tasklet);
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue