ibmveth: Add rx_copybreak

For small packets, create a new skb and copy the packet into it so we
avoid tearing down and creating a TCE entry.

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Santiago Leon <santil@linux.vnet.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Santiago Leon 2010-09-03 18:28:25 +00:00 committed by David S. Miller
parent c08cc3cceb
commit 8d86c61ae4
1 changed files with 23 additions and 6 deletions

View File

@ -122,6 +122,11 @@ module_param(tx_copybreak, uint, 0644);
MODULE_PARM_DESC(tx_copybreak, MODULE_PARM_DESC(tx_copybreak,
"Maximum size of packet that is copied to a new buffer on transmit"); "Maximum size of packet that is copied to a new buffer on transmit");
static unsigned int rx_copybreak __read_mostly = 128;
module_param(rx_copybreak, uint, 0644);
MODULE_PARM_DESC(rx_copybreak,
"Maximum size of packet that is copied to a new buffer on receive");
struct ibmveth_stat { struct ibmveth_stat {
char name[ETH_GSTRING_LEN]; char name[ETH_GSTRING_LEN];
int offset; int offset;
@ -1002,8 +1007,6 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
restart_poll: restart_poll:
do { do {
struct sk_buff *skb;
if (!ibmveth_rxq_pending_buffer(adapter)) if (!ibmveth_rxq_pending_buffer(adapter))
break; break;
@ -1014,20 +1017,34 @@ static int ibmveth_poll(struct napi_struct *napi, int budget)
ibmveth_debug_printk("recycling invalid buffer\n"); ibmveth_debug_printk("recycling invalid buffer\n");
ibmveth_rxq_recycle_buffer(adapter); ibmveth_rxq_recycle_buffer(adapter);
} else { } else {
struct sk_buff *skb, *new_skb;
int length = ibmveth_rxq_frame_length(adapter); int length = ibmveth_rxq_frame_length(adapter);
int offset = ibmveth_rxq_frame_offset(adapter); int offset = ibmveth_rxq_frame_offset(adapter);
int csum_good = ibmveth_rxq_csum_good(adapter); int csum_good = ibmveth_rxq_csum_good(adapter);
skb = ibmveth_rxq_get_buffer(adapter); skb = ibmveth_rxq_get_buffer(adapter);
if (csum_good)
skb->ip_summed = CHECKSUM_UNNECESSARY;
new_skb = NULL;
if (length < rx_copybreak)
new_skb = netdev_alloc_skb(netdev, length);
if (new_skb) {
skb_copy_to_linear_data(new_skb,
skb->data + offset,
length);
skb = new_skb;
ibmveth_rxq_recycle_buffer(adapter);
} else {
ibmveth_rxq_harvest_buffer(adapter); ibmveth_rxq_harvest_buffer(adapter);
skb_reserve(skb, offset); skb_reserve(skb, offset);
}
skb_put(skb, length); skb_put(skb, length);
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
if (csum_good)
skb->ip_summed = CHECKSUM_UNNECESSARY;
netif_receive_skb(skb); /* send it up */ netif_receive_skb(skb); /* send it up */
netdev->stats.rx_packets++; netdev->stats.rx_packets++;