ixgbevf: Cleanup accounting for space needed at start of xmit_frame
This change cleans up the accounting needed at the start of xmit_frame so that we can avoid doing too much work to determine how many descriptors we will need. Signed-off-by: Alexander Duyck <alexander.h.duyck@intel.com> Signed-off-by: Greg Rose <gregory.v.rose@intel.com> Tested-by: Sibai Li <sibai.li@intel.com> Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
This commit is contained in:
parent
e2c28ce760
commit
3595990a9c
|
@ -175,10 +175,8 @@ static void ixgbevf_unmap_and_free_tx_resource(struct ixgbevf_adapter *adapter,
|
||||||
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
|
#define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR)
|
||||||
|
|
||||||
/* Tx Descriptors needed, worst case */
|
/* Tx Descriptors needed, worst case */
|
||||||
#define TXD_USE_COUNT(S) (((S) >> IXGBE_MAX_TXD_PWR) + \
|
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
|
||||||
(((S) & (IXGBE_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
|
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
|
||||||
#define DESC_NEEDED (TXD_USE_COUNT(IXGBE_MAX_DATA_PER_TXD) /* skb->data */ + \
|
|
||||||
MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1) /* for context */
|
|
||||||
|
|
||||||
static void ixgbevf_tx_timeout(struct net_device *netdev);
|
static void ixgbevf_tx_timeout(struct net_device *netdev);
|
||||||
|
|
||||||
|
@ -2932,33 +2930,37 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
|
||||||
unsigned int tx_flags = 0;
|
unsigned int tx_flags = 0;
|
||||||
u8 hdr_len = 0;
|
u8 hdr_len = 0;
|
||||||
int r_idx = 0, tso;
|
int r_idx = 0, tso;
|
||||||
int count = 0;
|
u16 count = TXD_USE_COUNT(skb_headlen(skb));
|
||||||
|
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
|
||||||
unsigned int f;
|
unsigned short f;
|
||||||
|
#endif
|
||||||
|
|
||||||
tx_ring = &adapter->tx_ring[r_idx];
|
tx_ring = &adapter->tx_ring[r_idx];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* need: 1 descriptor per page * PAGE_SIZE/IXGBE_MAX_DATA_PER_TXD,
|
||||||
|
* + 1 desc for skb_headlen/IXGBE_MAX_DATA_PER_TXD,
|
||||||
|
* + 2 desc gap to keep tail from touching head,
|
||||||
|
* + 1 desc for context descriptor,
|
||||||
|
* otherwise try next time
|
||||||
|
*/
|
||||||
|
#if PAGE_SIZE > IXGBE_MAX_DATA_PER_TXD
|
||||||
|
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
|
||||||
|
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
|
||||||
|
#else
|
||||||
|
count += skb_shinfo(skb)->nr_frags;
|
||||||
|
#endif
|
||||||
|
if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count + 3)) {
|
||||||
|
adapter->tx_busy++;
|
||||||
|
return NETDEV_TX_BUSY;
|
||||||
|
}
|
||||||
|
|
||||||
if (vlan_tx_tag_present(skb)) {
|
if (vlan_tx_tag_present(skb)) {
|
||||||
tx_flags |= vlan_tx_tag_get(skb);
|
tx_flags |= vlan_tx_tag_get(skb);
|
||||||
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
|
tx_flags <<= IXGBE_TX_FLAGS_VLAN_SHIFT;
|
||||||
tx_flags |= IXGBE_TX_FLAGS_VLAN;
|
tx_flags |= IXGBE_TX_FLAGS_VLAN;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* four things can cause us to need a context descriptor */
|
|
||||||
if (skb_is_gso(skb) ||
|
|
||||||
(skb->ip_summed == CHECKSUM_PARTIAL) ||
|
|
||||||
(tx_flags & IXGBE_TX_FLAGS_VLAN))
|
|
||||||
count++;
|
|
||||||
|
|
||||||
count += TXD_USE_COUNT(skb_headlen(skb));
|
|
||||||
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
|
|
||||||
count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]));
|
|
||||||
|
|
||||||
if (ixgbevf_maybe_stop_tx(netdev, tx_ring, count)) {
|
|
||||||
adapter->tx_busy++;
|
|
||||||
return NETDEV_TX_BUSY;
|
|
||||||
}
|
|
||||||
|
|
||||||
first = tx_ring->next_to_use;
|
first = tx_ring->next_to_use;
|
||||||
|
|
||||||
if (skb->protocol == htons(ETH_P_IP))
|
if (skb->protocol == htons(ETH_P_IP))
|
||||||
|
|
Loading…
Reference in New Issue