2019-05-27 14:55:01 +08:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-or-later
|
2006-09-14 01:24:59 +08:00
|
|
|
/* drivers/net/ifb.c:
|
2006-01-09 14:34:25 +08:00
|
|
|
|
|
|
|
The purpose of this driver is to provide a device that allows
|
|
|
|
for sharing of resources:
|
|
|
|
|
|
|
|
1) qdiscs/policies that are per device as opposed to system wide.
|
|
|
|
ifb allows for a device which can be redirected to thus providing
|
|
|
|
an impression of sharing.
|
|
|
|
|
|
|
|
2) Allows for queueing incoming traffic for shaping instead of
|
2006-09-14 01:24:59 +08:00
|
|
|
dropping.
|
|
|
|
|
2006-01-09 14:34:25 +08:00
|
|
|
The original concept is based on what is known as the IMQ
|
|
|
|
driver initially written by Martin Devera, later rewritten
|
|
|
|
by Patrick McHardy and then maintained by Andre Correa.
|
|
|
|
|
|
|
|
You need the tc action mirror or redirect to feed this device
|
|
|
|
packets.
|
|
|
|
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2006-01-09 14:34:25 +08:00
|
|
|
Authors: Jamal Hadi Salim (2005)
|
2006-09-14 01:24:59 +08:00
|
|
|
|
2006-01-09 14:34:25 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/kernel.h>
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/etherdevice.h>
|
|
|
|
#include <linux/init.h>
|
2011-06-06 18:43:46 +08:00
|
|
|
#include <linux/interrupt.h>
|
2006-01-09 14:34:25 +08:00
|
|
|
#include <linux/moduleparam.h>
|
2006-09-14 01:24:59 +08:00
|
|
|
#include <net/pkt_sched.h>
|
2007-09-18 02:56:21 +08:00
|
|
|
#include <net/net_namespace.h>
|
2006-01-09 14:34:25 +08:00
|
|
|
|
|
|
|
#define TX_Q_LIMIT 32
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
struct ifb_q_private {
|
|
|
|
struct net_device *dev;
|
2006-01-09 14:34:25 +08:00
|
|
|
struct tasklet_struct ifb_tasklet;
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
int tasklet_pending;
|
|
|
|
int txqnum;
|
2006-01-09 14:34:25 +08:00
|
|
|
struct sk_buff_head rq;
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
u64 rx_packets;
|
|
|
|
u64 rx_bytes;
|
|
|
|
struct u64_stats_sync rsync;
|
2011-06-20 19:42:30 +08:00
|
|
|
|
|
|
|
struct u64_stats_sync tsync;
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
u64 tx_packets;
|
|
|
|
u64 tx_bytes;
|
2006-01-09 14:34:25 +08:00
|
|
|
struct sk_buff_head tq;
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
} ____cacheline_aligned_in_smp;
|
2006-01-09 14:34:25 +08:00
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
struct ifb_dev_private {
|
|
|
|
struct ifb_q_private *tx_private;
|
|
|
|
};
|
2006-01-09 14:34:25 +08:00
|
|
|
|
2009-09-01 03:50:51 +08:00
|
|
|
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
|
2006-01-09 14:34:25 +08:00
|
|
|
static int ifb_open(struct net_device *dev);
|
|
|
|
static int ifb_close(struct net_device *dev);
|
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
static void ifb_ri_tasklet(unsigned long _txp)
|
2006-01-09 14:34:25 +08:00
|
|
|
{
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
struct ifb_q_private *txp = (struct ifb_q_private *)_txp;
|
2008-08-01 07:58:50 +08:00
|
|
|
struct netdev_queue *txq;
|
2006-01-09 14:34:25 +08:00
|
|
|
struct sk_buff *skb;
|
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
|
|
|
|
skb = skb_peek(&txp->tq);
|
|
|
|
if (!skb) {
|
|
|
|
if (!__netif_tx_trylock(txq))
|
2006-01-09 14:34:25 +08:00
|
|
|
goto resched;
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
skb_queue_splice_tail_init(&txp->rq, &txp->tq);
|
|
|
|
__netif_tx_unlock(txq);
|
2006-01-09 14:34:25 +08:00
|
|
|
}
|
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
|
2020-03-25 20:47:18 +08:00
|
|
|
skb->redirected = 0;
|
2017-01-08 06:06:35 +08:00
|
|
|
skb->tc_skip_classify = 1;
|
2011-06-20 19:42:30 +08:00
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
u64_stats_update_begin(&txp->tsync);
|
|
|
|
txp->tx_packets++;
|
|
|
|
txp->tx_bytes += skb->len;
|
|
|
|
u64_stats_update_end(&txp->tsync);
|
2007-03-30 02:46:52 +08:00
|
|
|
|
2009-11-02 03:45:16 +08:00
|
|
|
rcu_read_lock();
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
|
2007-03-30 02:46:52 +08:00
|
|
|
if (!skb->dev) {
|
2009-11-02 03:45:16 +08:00
|
|
|
rcu_read_unlock();
|
2007-03-30 02:46:52 +08:00
|
|
|
dev_kfree_skb(skb);
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
txp->dev->stats.tx_dropped++;
|
|
|
|
if (skb_queue_len(&txp->tq) != 0)
|
2010-12-04 22:09:08 +08:00
|
|
|
goto resched;
|
2007-03-30 02:46:52 +08:00
|
|
|
break;
|
|
|
|
}
|
2009-11-02 03:45:16 +08:00
|
|
|
rcu_read_unlock();
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
skb->skb_iif = txp->dev->ifindex;
|
2007-03-30 02:46:52 +08:00
|
|
|
|
2020-03-25 20:47:18 +08:00
|
|
|
if (!skb->from_ingress) {
|
2006-01-09 14:34:25 +08:00
|
|
|
dev_queue_xmit(skb);
|
2017-01-08 06:06:38 +08:00
|
|
|
} else {
|
2018-05-25 05:38:29 +08:00
|
|
|
skb_pull_rcsum(skb, skb->mac_len);
|
2010-12-15 06:39:58 +08:00
|
|
|
netif_receive_skb(skb);
|
2017-01-08 06:06:38 +08:00
|
|
|
}
|
2006-01-09 14:34:25 +08:00
|
|
|
}
|
|
|
|
|
2008-08-01 07:58:50 +08:00
|
|
|
if (__netif_tx_trylock(txq)) {
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
skb = skb_peek(&txp->rq);
|
|
|
|
if (!skb) {
|
|
|
|
txp->tasklet_pending = 0;
|
|
|
|
if (netif_tx_queue_stopped(txq))
|
|
|
|
netif_tx_wake_queue(txq);
|
2006-01-09 14:34:25 +08:00
|
|
|
} else {
|
2008-08-01 07:58:50 +08:00
|
|
|
__netif_tx_unlock(txq);
|
2006-01-09 14:34:25 +08:00
|
|
|
goto resched;
|
|
|
|
}
|
2008-08-01 07:58:50 +08:00
|
|
|
__netif_tx_unlock(txq);
|
2006-01-09 14:34:25 +08:00
|
|
|
} else {
|
|
|
|
resched:
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
txp->tasklet_pending = 1;
|
|
|
|
tasklet_schedule(&txp->ifb_tasklet);
|
2006-01-09 14:34:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
|
2017-01-07 11:12:52 +08:00
|
|
|
static void ifb_stats64(struct net_device *dev,
|
|
|
|
struct rtnl_link_stats64 *stats)
|
2011-06-20 19:42:30 +08:00
|
|
|
{
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
struct ifb_dev_private *dp = netdev_priv(dev);
|
|
|
|
struct ifb_q_private *txp = dp->tx_private;
|
2011-06-20 19:42:30 +08:00
|
|
|
unsigned int start;
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
u64 packets, bytes;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++,txp++) {
|
|
|
|
do {
|
|
|
|
start = u64_stats_fetch_begin_irq(&txp->rsync);
|
|
|
|
packets = txp->rx_packets;
|
|
|
|
bytes = txp->rx_bytes;
|
|
|
|
} while (u64_stats_fetch_retry_irq(&txp->rsync, start));
|
|
|
|
stats->rx_packets += packets;
|
|
|
|
stats->rx_bytes += bytes;
|
|
|
|
|
|
|
|
do {
|
|
|
|
start = u64_stats_fetch_begin_irq(&txp->tsync);
|
|
|
|
packets = txp->tx_packets;
|
|
|
|
bytes = txp->tx_bytes;
|
|
|
|
} while (u64_stats_fetch_retry_irq(&txp->tsync, start));
|
|
|
|
stats->tx_packets += packets;
|
|
|
|
stats->tx_bytes += bytes;
|
|
|
|
}
|
2011-06-20 19:42:30 +08:00
|
|
|
stats->rx_dropped = dev->stats.rx_dropped;
|
|
|
|
stats->tx_dropped = dev->stats.tx_dropped;
|
|
|
|
}
|
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
static int ifb_dev_init(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ifb_dev_private *dp = netdev_priv(dev);
|
|
|
|
struct ifb_q_private *txp;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
|
|
|
|
if (!txp)
|
|
|
|
return -ENOMEM;
|
|
|
|
dp->tx_private = txp;
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++,txp++) {
|
|
|
|
txp->txqnum = i;
|
|
|
|
txp->dev = dev;
|
|
|
|
__skb_queue_head_init(&txp->rq);
|
|
|
|
__skb_queue_head_init(&txp->tq);
|
|
|
|
u64_stats_init(&txp->rsync);
|
|
|
|
u64_stats_init(&txp->tsync);
|
|
|
|
tasklet_init(&txp->ifb_tasklet, ifb_ri_tasklet,
|
|
|
|
(unsigned long)txp);
|
|
|
|
netif_tx_start_queue(netdev_get_tx_queue(dev, i));
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
2011-06-20 19:42:30 +08:00
|
|
|
|
2008-11-20 13:47:07 +08:00
|
|
|
static const struct net_device_ops ifb_netdev_ops = {
|
|
|
|
.ndo_open = ifb_open,
|
|
|
|
.ndo_stop = ifb_close,
|
2011-06-20 19:42:30 +08:00
|
|
|
.ndo_get_stats64 = ifb_stats64,
|
2008-11-21 12:14:53 +08:00
|
|
|
.ndo_start_xmit = ifb_xmit,
|
|
|
|
.ndo_validate_addr = eth_validate_addr,
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
.ndo_init = ifb_dev_init,
|
2008-11-20 13:47:07 +08:00
|
|
|
};
|
|
|
|
|
2011-11-15 23:29:55 +08:00
|
|
|
#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
|
ifb: add performance flags
Le lundi 03 janvier 2011 à 11:40 -0800, David Miller a écrit :
> From: Jarek Poplawski <jarkao2@gmail.com>
> Date: Mon, 3 Jan 2011 20:37:03 +0100
>
> > On Sun, Jan 02, 2011 at 09:24:36PM +0100, Eric Dumazet wrote:
> >> Le mercredi 29 décembre 2010 ?? 00:07 +0100, Jarek Poplawski a écrit :
> >>
> >> > Ingress is before vlans handler so these features and the
> >> > NETIF_F_HW_VLAN_TX flag seem useful for ifb considering
> >> > dev_hard_start_xmit() checks.
> >>
> >> OK, here is v2 of the patch then, thanks everybody.
> >>
> >>
> >> [PATCH v2 net-next-2.6] ifb: add performance flags
> >>
> >> IFB can use the full set of features flags (NETIF_F_SG |
> >> NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA) to
> >> avoid unnecessary split of some packets (GRO for example)
> >>
> >> Changli suggested to also set vlan_features,
> >
> > He also suggested more GSO flags of which especially NETIF_F_TSO6
> > seems interesting (wrt GRO)?
>
> I think at least TSO6 would very much be appropriate here.
Yes, why not, I am only wondering why loopback / dummy (and others ?)
only set NETIF_F_TSO :)
Since I want to play with ECN, I might also add NETIF_F_TSO_ECN ;)
For other flags, I really doubt it can matter on ifb ?
[PATCH v3 net-next-2.6] ifb: add performance flags
IFB can use the full set of features flags (NETIF_F_SG |
NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA) to
avoid unnecessary split of some packets (GRO for example)
Changli suggested to also set vlan_features, NETIF_F_TSO6,
NETIF_F_TSO_ECN.
Jarek suggested to add NETIF_F_HW_VLAN_TX as well.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Changli Gao <xiaosuo@gmail.com>
Cc: Jarek Poplawski <jarkao2@gmail.com>
Cc: Pawel Staszewski <pstaszewski@itcare.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-03 18:35:22 +08:00
|
|
|
NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
|
2016-05-07 09:19:59 +08:00
|
|
|
NETIF_F_GSO_ENCAP_ALL | \
|
2013-04-19 10:04:32 +08:00
|
|
|
NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
|
|
|
|
NETIF_F_HW_VLAN_STAG_TX)
|
ifb: add performance flags
Le lundi 03 janvier 2011 à 11:40 -0800, David Miller a écrit :
> From: Jarek Poplawski <jarkao2@gmail.com>
> Date: Mon, 3 Jan 2011 20:37:03 +0100
>
> > On Sun, Jan 02, 2011 at 09:24:36PM +0100, Eric Dumazet wrote:
> >> Le mercredi 29 décembre 2010 ?? 00:07 +0100, Jarek Poplawski a écrit :
> >>
> >> > Ingress is before vlans handler so these features and the
> >> > NETIF_F_HW_VLAN_TX flag seem useful for ifb considering
> >> > dev_hard_start_xmit() checks.
> >>
> >> OK, here is v2 of the patch then, thanks everybody.
> >>
> >>
> >> [PATCH v2 net-next-2.6] ifb: add performance flags
> >>
> >> IFB can use the full set of features flags (NETIF_F_SG |
> >> NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA) to
> >> avoid unnecessary split of some packets (GRO for example)
> >>
> >> Changli suggested to also set vlan_features,
> >
> > He also suggested more GSO flags of which especially NETIF_F_TSO6
> > seems interesting (wrt GRO)?
>
> I think at least TSO6 would very much be appropriate here.
Yes, why not, I am only wondering why loopback / dummy (and others ?)
only set NETIF_F_TSO :)
Since I want to play with ECN, I might also add NETIF_F_TSO_ECN ;)
For other flags, I really doubt it can matter on ifb ?
[PATCH v3 net-next-2.6] ifb: add performance flags
IFB can use the full set of features flags (NETIF_F_SG |
NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA) to
avoid unnecessary split of some packets (GRO for example)
Changli suggested to also set vlan_features, NETIF_F_TSO6,
NETIF_F_TSO_ECN.
Jarek suggested to add NETIF_F_HW_VLAN_TX as well.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Changli Gao <xiaosuo@gmail.com>
Cc: Jarek Poplawski <jarkao2@gmail.com>
Cc: Pawel Staszewski <pstaszewski@itcare.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-03 18:35:22 +08:00
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
static void ifb_dev_free(struct net_device *dev)
|
|
|
|
{
|
|
|
|
struct ifb_dev_private *dp = netdev_priv(dev);
|
|
|
|
struct ifb_q_private *txp = dp->tx_private;
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < dev->num_tx_queues; i++,txp++) {
|
|
|
|
tasklet_kill(&txp->ifb_tasklet);
|
|
|
|
__skb_queue_purge(&txp->rq);
|
|
|
|
__skb_queue_purge(&txp->tq);
|
|
|
|
}
|
|
|
|
kfree(dp->tx_private);
|
|
|
|
}
|
|
|
|
|
2007-06-14 03:05:06 +08:00
|
|
|
static void ifb_setup(struct net_device *dev)
|
2006-01-09 14:34:25 +08:00
|
|
|
{
|
|
|
|
/* Initialize the device structure. */
|
2008-11-20 13:47:07 +08:00
|
|
|
dev->netdev_ops = &ifb_netdev_ops;
|
2006-01-09 14:34:25 +08:00
|
|
|
|
|
|
|
/* Fill in device structure with ethernet-generic values. */
|
|
|
|
ether_setup(dev);
|
|
|
|
dev->tx_queue_len = TX_Q_LIMIT;
|
2008-11-20 13:47:07 +08:00
|
|
|
|
ifb: add performance flags
Le lundi 03 janvier 2011 à 11:40 -0800, David Miller a écrit :
> From: Jarek Poplawski <jarkao2@gmail.com>
> Date: Mon, 3 Jan 2011 20:37:03 +0100
>
> > On Sun, Jan 02, 2011 at 09:24:36PM +0100, Eric Dumazet wrote:
> >> Le mercredi 29 décembre 2010 ?? 00:07 +0100, Jarek Poplawski a écrit :
> >>
> >> > Ingress is before vlans handler so these features and the
> >> > NETIF_F_HW_VLAN_TX flag seem useful for ifb considering
> >> > dev_hard_start_xmit() checks.
> >>
> >> OK, here is v2 of the patch then, thanks everybody.
> >>
> >>
> >> [PATCH v2 net-next-2.6] ifb: add performance flags
> >>
> >> IFB can use the full set of features flags (NETIF_F_SG |
> >> NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA) to
> >> avoid unnecessary split of some packets (GRO for example)
> >>
> >> Changli suggested to also set vlan_features,
> >
> > He also suggested more GSO flags of which especially NETIF_F_TSO6
> > seems interesting (wrt GRO)?
>
> I think at least TSO6 would very much be appropriate here.
Yes, why not, I am only wondering why loopback / dummy (and others ?)
only set NETIF_F_TSO :)
Since I want to play with ECN, I might also add NETIF_F_TSO_ECN ;)
For other flags, I really doubt it can matter on ifb ?
[PATCH v3 net-next-2.6] ifb: add performance flags
IFB can use the full set of features flags (NETIF_F_SG |
NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA) to
avoid unnecessary split of some packets (GRO for example)
Changli suggested to also set vlan_features, NETIF_F_TSO6,
NETIF_F_TSO_ECN.
Jarek suggested to add NETIF_F_HW_VLAN_TX as well.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Changli Gao <xiaosuo@gmail.com>
Cc: Jarek Poplawski <jarkao2@gmail.com>
Cc: Pawel Staszewski <pstaszewski@itcare.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-03 18:35:22 +08:00
|
|
|
dev->features |= IFB_FEATURES;
|
2016-05-07 09:19:59 +08:00
|
|
|
dev->hw_features |= dev->features;
|
|
|
|
dev->hw_enc_features |= dev->features;
|
2014-03-28 10:14:47 +08:00
|
|
|
dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
|
|
|
|
NETIF_F_HW_VLAN_STAG_TX);
|
ifb: add performance flags
Le lundi 03 janvier 2011 à 11:40 -0800, David Miller a écrit :
> From: Jarek Poplawski <jarkao2@gmail.com>
> Date: Mon, 3 Jan 2011 20:37:03 +0100
>
> > On Sun, Jan 02, 2011 at 09:24:36PM +0100, Eric Dumazet wrote:
> >> Le mercredi 29 décembre 2010 ?? 00:07 +0100, Jarek Poplawski a écrit :
> >>
> >> > Ingress is before vlans handler so these features and the
> >> > NETIF_F_HW_VLAN_TX flag seem useful for ifb considering
> >> > dev_hard_start_xmit() checks.
> >>
> >> OK, here is v2 of the patch then, thanks everybody.
> >>
> >>
> >> [PATCH v2 net-next-2.6] ifb: add performance flags
> >>
> >> IFB can use the full set of features flags (NETIF_F_SG |
> >> NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA) to
> >> avoid unnecessary split of some packets (GRO for example)
> >>
> >> Changli suggested to also set vlan_features,
> >
> > He also suggested more GSO flags of which especially NETIF_F_TSO6
> > seems interesting (wrt GRO)?
>
> I think at least TSO6 would very much be appropriate here.
Yes, why not, I am only wondering why loopback / dummy (and others ?)
only set NETIF_F_TSO :)
Since I want to play with ECN, I might also add NETIF_F_TSO_ECN ;)
For other flags, I really doubt it can matter on ifb ?
[PATCH v3 net-next-2.6] ifb: add performance flags
IFB can use the full set of features flags (NETIF_F_SG |
NETIF_F_FRAGLIST | NETIF_F_TSO | NETIF_F_NO_CSUM | NETIF_F_HIGHDMA) to
avoid unnecessary split of some packets (GRO for example)
Changli suggested to also set vlan_features, NETIF_F_TSO6,
NETIF_F_TSO_ECN.
Jarek suggested to add NETIF_F_HW_VLAN_TX as well.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Changli Gao <xiaosuo@gmail.com>
Cc: Jarek Poplawski <jarkao2@gmail.com>
Cc: Pawel Staszewski <pstaszewski@itcare.pl>
Signed-off-by: David S. Miller <davem@davemloft.net>
2011-01-03 18:35:22 +08:00
|
|
|
|
2006-01-09 14:34:25 +08:00
|
|
|
dev->flags |= IFF_NOARP;
|
|
|
|
dev->flags &= ~IFF_MULTICAST;
|
2014-10-06 09:38:35 +08:00
|
|
|
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
|
|
|
|
netif_keep_dst(dev);
|
2012-02-15 14:45:39 +08:00
|
|
|
eth_hw_addr_random(dev);
|
net: Fix inconsistent teardown and release of private netdev state.
Network devices can allocate reasources and private memory using
netdev_ops->ndo_init(). However, the release of these resources
can occur in one of two different places.
Either netdev_ops->ndo_uninit() or netdev->destructor().
The decision of which operation frees the resources depends upon
whether it is necessary for all netdev refs to be released before it
is safe to perform the freeing.
netdev_ops->ndo_uninit() presumably can occur right after the
NETDEV_UNREGISTER notifier completes and the unicast and multicast
address lists are flushed.
netdev->destructor(), on the other hand, does not run until the
netdev references all go away.
Further complicating the situation is that netdev->destructor()
almost universally does also a free_netdev().
This creates a problem for the logic in register_netdevice().
Because all callers of register_netdevice() manage the freeing
of the netdev, and invoke free_netdev(dev) if register_netdevice()
fails.
If netdev_ops->ndo_init() succeeds, but something else fails inside
of register_netdevice(), it does call ndo_ops->ndo_uninit(). But
it is not able to invoke netdev->destructor().
This is because netdev->destructor() will do a free_netdev() and
then the caller of register_netdevice() will do the same.
However, this means that the resources that would normally be released
by netdev->destructor() will not be.
Over the years drivers have added local hacks to deal with this, by
invoking their destructor parts by hand when register_netdevice()
fails.
Many drivers do not try to deal with this, and instead we have leaks.
Let's close this hole by formalizing the distinction between what
private things need to be freed up by netdev->destructor() and whether
the driver needs unregister_netdevice() to perform the free_netdev().
netdev->priv_destructor() performs all actions to free up the private
resources that used to be freed by netdev->destructor(), except for
free_netdev().
netdev->needs_free_netdev is a boolean that indicates whether
free_netdev() should be done at the end of unregister_netdevice().
Now, register_netdevice() can sanely release all resources after
ndo_ops->ndo_init() succeeds, by invoking both ndo_ops->ndo_uninit()
and netdev->priv_destructor().
And at the end of unregister_netdevice(), we invoke
netdev->priv_destructor() and optionally call free_netdev().
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-05-09 00:52:56 +08:00
|
|
|
dev->needs_free_netdev = true;
|
|
|
|
dev->priv_destructor = ifb_dev_free;
|
2017-09-22 23:57:49 +08:00
|
|
|
|
|
|
|
dev->min_mtu = 0;
|
|
|
|
dev->max_mtu = 0;
|
2006-01-09 14:34:25 +08:00
|
|
|
}
|
|
|
|
|
2009-09-01 03:50:51 +08:00
|
|
|
static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
|
2006-01-09 14:34:25 +08:00
|
|
|
{
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
struct ifb_dev_private *dp = netdev_priv(dev);
|
|
|
|
struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
|
2006-01-09 14:34:25 +08:00
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
u64_stats_update_begin(&txp->rsync);
|
|
|
|
txp->rx_packets++;
|
|
|
|
txp->rx_bytes += skb->len;
|
|
|
|
u64_stats_update_end(&txp->rsync);
|
2006-01-09 14:34:25 +08:00
|
|
|
|
2020-03-25 20:47:18 +08:00
|
|
|
if (!skb->redirected || !skb->skb_iif) {
|
2006-01-09 14:34:25 +08:00
|
|
|
dev_kfree_skb(skb);
|
2011-06-20 19:42:30 +08:00
|
|
|
dev->stats.rx_dropped++;
|
2009-09-01 03:50:51 +08:00
|
|
|
return NETDEV_TX_OK;
|
2006-01-09 14:34:25 +08:00
|
|
|
}
|
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
|
|
|
|
netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
|
2006-01-09 14:34:25 +08:00
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
__skb_queue_tail(&txp->rq, skb);
|
|
|
|
if (!txp->tasklet_pending) {
|
|
|
|
txp->tasklet_pending = 1;
|
|
|
|
tasklet_schedule(&txp->ifb_tasklet);
|
2006-01-09 14:34:25 +08:00
|
|
|
}
|
|
|
|
|
2009-09-01 03:50:51 +08:00
|
|
|
return NETDEV_TX_OK;
|
2006-01-09 14:34:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int ifb_close(struct net_device *dev)
|
|
|
|
{
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
netif_tx_stop_all_queues(dev);
|
2006-01-09 14:34:25 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ifb_open(struct net_device *dev)
|
|
|
|
{
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
netif_tx_start_all_queues(dev);
|
2006-01-09 14:34:25 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-06-26 05:56:01 +08:00
|
|
|
static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
|
|
|
|
struct netlink_ext_ack *extack)
|
2007-07-12 10:42:31 +08:00
|
|
|
{
|
|
|
|
if (tb[IFLA_ADDRESS]) {
|
|
|
|
if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
|
|
|
|
return -EINVAL;
|
|
|
|
if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
|
|
|
|
return -EADDRNOTAVAIL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2007-06-14 03:05:06 +08:00
|
|
|
static struct rtnl_link_ops ifb_link_ops __read_mostly = {
|
|
|
|
.kind = "ifb",
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
.priv_size = sizeof(struct ifb_dev_private),
|
2007-06-14 03:05:06 +08:00
|
|
|
.setup = ifb_setup,
|
2007-07-12 10:42:31 +08:00
|
|
|
.validate = ifb_validate,
|
2007-06-14 03:05:06 +08:00
|
|
|
};
|
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
/* Number of ifb devices to be set up by this module.
|
|
|
|
* Note that these legacy devices have one queue.
|
|
|
|
* Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
|
|
|
|
*/
|
|
|
|
static int numifbs = 2;
|
2007-07-12 10:42:13 +08:00
|
|
|
module_param(numifbs, int, 0);
|
|
|
|
MODULE_PARM_DESC(numifbs, "Number of ifb devices");
|
|
|
|
|
2006-01-09 14:34:25 +08:00
|
|
|
static int __init ifb_init_one(int index)
|
|
|
|
{
|
|
|
|
struct net_device *dev_ifb;
|
|
|
|
int err;
|
|
|
|
|
ifb: add multiqueue operation
Add multiqueue capabilities to ifb netdevice.
This removes last bottleneck for ingress when mq qdisc can be used
to shard load from multiple RX queues on physical device.
Tested:
# netem based setup, installed at receiver side
ETH=eth0
IFB=ifb10
EST="est 1sec 4sec" # Optional rate estimator
RTT_HALF=2ms
#REORDER=20us
#LOSS="loss 1"
TXQ=8
ip link add ifb10 numtxqueues $TXQ type ifb
ip link set dev $IFB up
tc qdisc add dev $ETH ingress 2>/dev/null
tc filter add dev $ETH parent ffff: \
protocol ip u32 match u32 0 0 flowid 1:1 \
action mirred egress redirect dev $IFB
tc qdisc del dev $IFB root 2>/dev/null
tc qdisc add dev $IFB root handle 1: mq
for i in `seq 1 $TXQ`
do
slot=$( printf %x $(( i )) )
tc qd add dev $IFB parent 1:$slot $EST netem \
limit 100000 delay $RTT_HALF $REORDER $LOSS
done
lpaa24:~# tc -s -d qd sh dev ifb10
qdisc mq 1: root
Sent 316544766 bytes 5265927 pkt (dropped 0, overlimits 0 requeues 0)
backlog 98880b 1648p requeues 0
qdisc netem 8002: parent 1:1 limit 100000 delay 2.0ms
Sent 39601416 bytes 658721 pkt (dropped 0, overlimits 0 requeues 0)
rate 38235Kbit 79657pps backlog 12240b 204p requeues 0
qdisc netem 8003: parent 1:2 limit 100000 delay 2.0ms
Sent 39472866 bytes 657227 pkt (dropped 0, overlimits 0 requeues 0)
rate 38234Kbit 79655pps backlog 10620b 176p requeues 0
qdisc netem 8004: parent 1:3 limit 100000 delay 2.0ms
Sent 39703417 bytes 659699 pkt (dropped 0, overlimits 0 requeues 0)
rate 38320Kbit 79831pps backlog 12780b 213p requeues 0
qdisc netem 8005: parent 1:4 limit 100000 delay 2.0ms
Sent 39565149 bytes 658011 pkt (dropped 0, overlimits 0 requeues 0)
rate 38174Kbit 79530pps backlog 11880b 198p requeues 0
qdisc netem 8006: parent 1:5 limit 100000 delay 2.0ms
Sent 39506078 bytes 657354 pkt (dropped 0, overlimits 0 requeues 0)
rate 38195Kbit 79571pps backlog 12480b 208p requeues 0
qdisc netem 8007: parent 1:6 limit 100000 delay 2.0ms
Sent 39675994 bytes 658849 pkt (dropped 0, overlimits 0 requeues 0)
rate 38323Kbit 79838pps backlog 12600b 210p requeues 0
qdisc netem 8008: parent 1:7 limit 100000 delay 2.0ms
Sent 39532042 bytes 658367 pkt (dropped 0, overlimits 0 requeues 0)
rate 38177Kbit 79536pps backlog 13140b 219p requeues 0
qdisc netem 8009: parent 1:8 limit 100000 delay 2.0ms
Sent 39488164 bytes 657705 pkt (dropped 0, overlimits 0 requeues 0)
rate 38192Kbit 79568pps backlog 13Kb 222p requeues 0
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: Alexei Starovoitov <ast@plumgrid.com>
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Cc: John Fastabend <john.fastabend@gmail.com>
Acked-by: Alexei Starovoitov <ast@plumgrid.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2015-07-07 04:05:28 +08:00
|
|
|
dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
|
net: set name_assign_type in alloc_netdev()
Extend alloc_netdev{,_mq{,s}}() to take name_assign_type as argument, and convert
all users to pass NET_NAME_UNKNOWN.
Coccinelle patch:
@@
expression sizeof_priv, name, setup, txqs, rxqs, count;
@@
(
-alloc_netdev_mqs(sizeof_priv, name, setup, txqs, rxqs)
+alloc_netdev_mqs(sizeof_priv, name, NET_NAME_UNKNOWN, setup, txqs, rxqs)
|
-alloc_netdev_mq(sizeof_priv, name, setup, count)
+alloc_netdev_mq(sizeof_priv, name, NET_NAME_UNKNOWN, setup, count)
|
-alloc_netdev(sizeof_priv, name, setup)
+alloc_netdev(sizeof_priv, name, NET_NAME_UNKNOWN, setup)
)
v9: move comments here from the wrong commit
Signed-off-by: Tom Gundersen <teg@jklm.no>
Reviewed-by: David Herrmann <dh.herrmann@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-07-14 22:37:24 +08:00
|
|
|
NET_NAME_UNKNOWN, ifb_setup);
|
2006-01-09 14:34:25 +08:00
|
|
|
|
|
|
|
if (!dev_ifb)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
2007-06-14 03:05:06 +08:00
|
|
|
dev_ifb->rtnl_link_ops = &ifb_link_ops;
|
|
|
|
err = register_netdevice(dev_ifb);
|
|
|
|
if (err < 0)
|
|
|
|
goto err;
|
2008-03-21 08:05:13 +08:00
|
|
|
|
2007-06-14 03:05:06 +08:00
|
|
|
return 0;
|
2007-06-14 03:04:51 +08:00
|
|
|
|
2007-06-14 03:05:06 +08:00
|
|
|
err:
|
|
|
|
free_netdev(dev_ifb);
|
|
|
|
return err;
|
2006-09-14 01:24:59 +08:00
|
|
|
}
|
2006-01-09 14:34:25 +08:00
|
|
|
|
|
|
|
static int __init ifb_init_module(void)
|
2006-09-14 01:24:59 +08:00
|
|
|
{
|
2007-06-14 03:05:06 +08:00
|
|
|
int i, err;
|
|
|
|
|
2018-03-31 00:38:37 +08:00
|
|
|
down_write(&pernet_ops_rwsem);
|
2007-06-14 03:05:06 +08:00
|
|
|
rtnl_lock();
|
|
|
|
err = __rtnl_link_register(&ifb_link_ops);
|
2013-07-11 19:04:06 +08:00
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
2007-06-14 03:04:51 +08:00
|
|
|
|
2013-07-10 12:04:02 +08:00
|
|
|
for (i = 0; i < numifbs && !err; i++) {
|
2006-09-14 01:24:59 +08:00
|
|
|
err = ifb_init_one(i);
|
2013-07-10 12:04:02 +08:00
|
|
|
cond_resched();
|
|
|
|
}
|
2007-07-12 10:42:13 +08:00
|
|
|
if (err)
|
2007-06-14 03:05:06 +08:00
|
|
|
__rtnl_link_unregister(&ifb_link_ops);
|
2013-07-11 19:04:06 +08:00
|
|
|
|
|
|
|
out:
|
2007-06-14 03:05:06 +08:00
|
|
|
rtnl_unlock();
|
2018-03-31 00:38:37 +08:00
|
|
|
up_write(&pernet_ops_rwsem);
|
2006-01-09 14:34:25 +08:00
|
|
|
|
|
|
|
return err;
|
2006-09-14 01:24:59 +08:00
|
|
|
}
|
2006-01-09 14:34:25 +08:00
|
|
|
|
|
|
|
static void __exit ifb_cleanup_module(void)
|
|
|
|
{
|
2007-07-12 10:42:13 +08:00
|
|
|
rtnl_link_unregister(&ifb_link_ops);
|
2006-01-09 14:34:25 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
module_init(ifb_init_module);
|
|
|
|
module_exit(ifb_cleanup_module);
|
|
|
|
MODULE_LICENSE("GPL");
|
|
|
|
MODULE_AUTHOR("Jamal Hadi Salim");
|
2007-06-14 03:05:06 +08:00
|
|
|
MODULE_ALIAS_RTNL_LINK("ifb");
|