igb: Only DMA sync frame length

On some platforms, syncing a buffer for DMA is expensive. Rather than
sync the whole 2K receive buffer, only synchronise the length of the
frame, which will typically be the MTU, or a much smaller TCP ACK.

For an IMX6Q, this gives around 6% increased TCP receive performance,
which is cache operations bound and reduces CPU load for TCP transmit.

Signed-off-by: Andrew Lunn <andrew@lunn.ch>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Andrew Lunn 2016-06-03 23:03:25 +02:00 committed by Jeff Kirsher
parent 581e0c7df9
commit 64f2525ca4
1 changed files with 4 additions and 3 deletions

View File

@ -6856,12 +6856,12 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
**/ **/
static bool igb_add_rx_frag(struct igb_ring *rx_ring, static bool igb_add_rx_frag(struct igb_ring *rx_ring,
struct igb_rx_buffer *rx_buffer, struct igb_rx_buffer *rx_buffer,
unsigned int size,
union e1000_adv_rx_desc *rx_desc, union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct page *page = rx_buffer->page; struct page *page = rx_buffer->page;
unsigned char *va = page_address(page) + rx_buffer->page_offset; unsigned char *va = page_address(page) + rx_buffer->page_offset;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ; unsigned int truesize = IGB_RX_BUFSZ;
#else #else
@ -6913,6 +6913,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc, union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb) struct sk_buff *skb)
{ {
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
struct igb_rx_buffer *rx_buffer; struct igb_rx_buffer *rx_buffer;
struct page *page; struct page *page;
@ -6948,11 +6949,11 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
dma_sync_single_range_for_cpu(rx_ring->dev, dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma, rx_buffer->dma,
rx_buffer->page_offset, rx_buffer->page_offset,
IGB_RX_BUFSZ, size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
/* pull page into skb */ /* pull page into skb */
if (igb_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { if (igb_add_rx_frag(rx_ring, rx_buffer, size, rx_desc, skb)) {
/* hand second half of page back to the ring */ /* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer); igb_reuse_rx_page(rx_ring, rx_buffer);
} else { } else {