bcm63xx_enet: batch process rx path

Use netif_receive_skb_list to batch process rx skb.
Tested on BCM6328 320 MHz using iperf3 -M 512, increasing performance
by 12.5%.

Before:
[ ID] Interval           Transfer     Bandwidth       Retr
[  4]   0.00-30.00  sec   120 MBytes  33.7 Mbits/sec  277         sender
[  4]   0.00-30.00  sec   120 MBytes  33.5 Mbits/sec            receiver

After:
[ ID] Interval           Transfer     Bandwidth       Retr
[  4]   0.00-30.00  sec   136 MBytes  37.9 Mbits/sec  203         sender
[  4]   0.00-30.00  sec   135 MBytes  37.7 Mbits/sec            receiver

Signed-off-by: Sieng Piaw Liew <liew.s.piaw@gmail.com>
Acked-by: Florian Fainelli <f.fainelli@gmail.com>
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
Sieng Piaw Liew 2021-01-06 22:42:02 +08:00 committed by Jakub Kicinski
parent 2e42338705
commit 9cbfea02c1
1 changed files with 5 additions and 1 deletions

View File

@ -297,10 +297,12 @@ static void bcm_enet_refill_rx_timer(struct timer_list *t)
static int bcm_enet_receive_queue(struct net_device *dev, int budget)
{
struct bcm_enet_priv *priv;
struct list_head rx_list;
struct device *kdev;
int processed;
priv = netdev_priv(dev);
INIT_LIST_HEAD(&rx_list);
kdev = &priv->pdev->dev;
processed = 0;
@ -391,10 +393,12 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
skb->protocol = eth_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
netif_receive_skb(skb);
list_add_tail(&skb->list, &rx_list);
} while (--budget > 0);
netif_receive_skb_list(&rx_list);
if (processed || !priv->rx_desc_count) {
bcm_enet_refill_rx(dev);