e1000, e1000e: Use dma_rmb instead of rmb for descriptor read ordering
This change replaces calls to rmb with dma_rmb in the case where we want to order all follow-on descriptor reads after the check for the descriptor status bit. Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com> Acked-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
03cc864a25
commit
837a1dba00
|
@ -3856,7 +3856,7 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
|
|||
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
|
||||
(count < tx_ring->count)) {
|
||||
bool cleaned = false;
|
||||
rmb(); /* read buffer_info after eop_desc */
|
||||
dma_rmb(); /* read buffer_info after eop_desc */
|
||||
for ( ; !cleaned; count++) {
|
||||
tx_desc = E1000_TX_DESC(*tx_ring, i);
|
||||
buffer_info = &tx_ring->buffer_info[i];
|
||||
|
@ -4154,7 +4154,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
|
|||
if (*work_done >= work_to_do)
|
||||
break;
|
||||
(*work_done)++;
|
||||
rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
|
||||
status = rx_desc->status;
|
||||
|
||||
|
@ -4375,7 +4375,7 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
|
|||
if (*work_done >= work_to_do)
|
||||
break;
|
||||
(*work_done)++;
|
||||
rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
|
||||
status = rx_desc->status;
|
||||
length = le16_to_cpu(rx_desc->length);
|
||||
|
|
|
@ -947,7 +947,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|||
if (*work_done >= work_to_do)
|
||||
break;
|
||||
(*work_done)++;
|
||||
rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
|
||||
skb = buffer_info->skb;
|
||||
buffer_info->skb = NULL;
|
||||
|
@ -1232,7 +1232,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
|
|||
(count < tx_ring->count)) {
|
||||
bool cleaned = false;
|
||||
|
||||
rmb(); /* read buffer_info after eop_desc */
|
||||
dma_rmb(); /* read buffer_info after eop_desc */
|
||||
for (; !cleaned; count++) {
|
||||
tx_desc = E1000_TX_DESC(*tx_ring, i);
|
||||
buffer_info = &tx_ring->buffer_info[i];
|
||||
|
@ -1332,7 +1332,7 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
|
|||
break;
|
||||
(*work_done)++;
|
||||
skb = buffer_info->skb;
|
||||
rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
|
||||
/* in the packet split case this is header only */
|
||||
prefetch(skb->data - NET_IP_ALIGN);
|
||||
|
@ -1536,7 +1536,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
|
|||
if (*work_done >= work_to_do)
|
||||
break;
|
||||
(*work_done)++;
|
||||
rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
dma_rmb(); /* read descriptor and rx_buffer_info after status DD */
|
||||
|
||||
skb = buffer_info->skb;
|
||||
buffer_info->skb = NULL;
|
||||
|
|
Loading…
Reference in New Issue