net: Add memory barriers to prevent possible race in byte queue limits
This change adds a memory barrier to the byte queue limit code to address a possible race as has been seen in the past with the netif_stop_queue/netif_wake_queue logic. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Tested-by: Stephen Ko <stephen.s.ko@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
5c4903549c
commit
b37c0fbe3f
|
@ -1899,12 +1899,22 @@ static inline void netdev_tx_sent_queue(struct netdev_queue *dev_queue,
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_BQL
|
#ifdef CONFIG_BQL
|
||||||
dql_queued(&dev_queue->dql, bytes);
|
dql_queued(&dev_queue->dql, bytes);
|
||||||
if (unlikely(dql_avail(&dev_queue->dql) < 0)) {
|
|
||||||
set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
|
if (likely(dql_avail(&dev_queue->dql) >= 0))
|
||||||
if (unlikely(dql_avail(&dev_queue->dql) >= 0))
|
return;
|
||||||
clear_bit(__QUEUE_STATE_STACK_XOFF,
|
|
||||||
&dev_queue->state);
|
set_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
|
||||||
}
|
|
||||||
|
/*
|
||||||
|
* The XOFF flag must be set before checking the dql_avail below,
|
||||||
|
* because in netdev_tx_completed_queue we update the dql_completed
|
||||||
|
* before checking the XOFF flag.
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
/* check again in case another CPU has just made room avail */
|
||||||
|
if (unlikely(dql_avail(&dev_queue->dql) >= 0))
|
||||||
|
clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1917,16 +1927,23 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue,
|
||||||
unsigned pkts, unsigned bytes)
|
unsigned pkts, unsigned bytes)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_BQL
|
#ifdef CONFIG_BQL
|
||||||
if (likely(bytes)) {
|
if (unlikely(!bytes))
|
||||||
dql_completed(&dev_queue->dql, bytes);
|
return;
|
||||||
if (unlikely(test_bit(__QUEUE_STATE_STACK_XOFF,
|
|
||||||
&dev_queue->state) &&
|
dql_completed(&dev_queue->dql, bytes);
|
||||||
dql_avail(&dev_queue->dql) >= 0)) {
|
|
||||||
if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF,
|
/*
|
||||||
&dev_queue->state))
|
* Without the memory barrier there is a small possiblity that
|
||||||
netif_schedule_queue(dev_queue);
|
* netdev_tx_sent_queue will miss the update and cause the queue to
|
||||||
}
|
* be stopped forever
|
||||||
}
|
*/
|
||||||
|
smp_mb();
|
||||||
|
|
||||||
|
if (dql_avail(&dev_queue->dql) < 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (test_and_clear_bit(__QUEUE_STATE_STACK_XOFF, &dev_queue->state))
|
||||||
|
netif_schedule_queue(dev_queue);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue