fm10k: Don't assume page fragments are page size

This change pulls out the optimization that assumed that all fragments
would be limited to page size.  That hasn't been the case for some time now
and to assume this is incorrect as the TCP allocator can provide up to a
32K page fragment.

Signed-off-by: Alexander Duyck <alexander.h.duyck@redhat.com>
Acked-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Krishneil Singh <krishneil.k.singh@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
Alexander Duyck 2015-06-16 11:47:12 -07:00 committed by Jeff Kirsher
parent bdc7f5902d
commit aae072e363
1 changed files with 1 additions and 6 deletions

View File

@ -1079,9 +1079,7 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_tx_buffer *first; struct fm10k_tx_buffer *first;
int tso; int tso;
u32 tx_flags = 0; u32 tx_flags = 0;
#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
unsigned short f; unsigned short f;
#endif
u16 count = TXD_USE_COUNT(skb_headlen(skb)); u16 count = TXD_USE_COUNT(skb_headlen(skb));
/* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD, /* need: 1 descriptor per page * PAGE_SIZE/FM10K_MAX_DATA_PER_TXD,
@ -1089,12 +1087,9 @@ netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
* + 2 desc gap to keep tail from touching head * + 2 desc gap to keep tail from touching head
* otherwise try next time * otherwise try next time
*/ */
#if PAGE_SIZE > FM10K_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
#else
count += skb_shinfo(skb)->nr_frags;
#endif
if (fm10k_maybe_stop_tx(tx_ring, count + 3)) { if (fm10k_maybe_stop_tx(tx_ring, count + 3)) {
tx_ring->tx_stats.tx_busy++; tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;