2005-04-17 06:20:36 +08:00
|
|
|
#ifndef __NET_SCHED_GENERIC_H
|
|
|
|
#define __NET_SCHED_GENERIC_H
|
|
|
|
|
|
|
|
#include <linux/netdevice.h>
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/rcupdate.h>
|
|
|
|
#include <linux/pkt_sched.h>
|
|
|
|
#include <linux/pkt_cls.h>
|
2014-09-29 02:52:56 +08:00
|
|
|
#include <linux/percpu.h>
|
qdisc: bulk dequeue support for qdiscs with TCQ_F_ONETXQUEUE
Based on DaveM's recent API work on dev_hard_start_xmit(), that allows
sending/processing an entire skb list.
This patch implements qdisc bulk dequeue, by allowing multiple packets
to be dequeued in dequeue_skb().
The optimization principle for this is two fold, (1) to amortize
locking cost and (2) avoid expensive tailptr update for notifying HW.
(1) Several packets are dequeued while holding the qdisc root_lock,
amortizing locking cost over several packet. The dequeued SKB list is
processed under the TXQ lock in dev_hard_start_xmit(), thus also
amortizing the cost of the TXQ lock.
(2) Further more, dev_hard_start_xmit() will utilize the skb->xmit_more
API to delay HW tailptr update, which also reduces the cost per
packet.
One restriction of the new API is that every SKB must belong to the
same TXQ. This patch takes the easy way out, by restricting bulk
dequeue to qdisc's with the TCQ_F_ONETXQUEUE flag, that specifies the
qdisc only have attached a single TXQ.
Some detail about the flow; dev_hard_start_xmit() will process the skb
list, and transmit packets individually towards the driver (see
xmit_one()). In case the driver stops midway in the list, the
remaining skb list is returned by dev_hard_start_xmit(). In
sch_direct_xmit() this returned list is requeued by dev_requeue_skb().
To avoid overshooting the HW limits, which results in requeuing, the
patch limits the amount of bytes dequeued, based on the drivers BQL
limits. In-effect bulking will only happen for BQL enabled drivers.
Small amounts for extra HoL blocking (2x MTU/0.24ms) were
measured at 100Mbit/s, with bulking 8 packets, but the
oscillating nature of the measurement indicate something, like
sched latency might be causing this effect. More comparisons
show, that this oscillation goes away occationally. Thus, we
disregard this artifact completely and remove any "magic" bulking
limit.
For now, as a conservative approach, stop bulking when seeing TSO and
segmented GSO packets. They already benefit from bulking on their own.
A followup patch add this, to allow easier bisect-ability for finding
regressions.
Jointed work with Hannes, Daniel and Florian.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-10-02 04:35:59 +08:00
|
|
|
#include <linux/dynamic_queue_limits.h>
|
2017-05-17 17:08:01 +08:00
|
|
|
#include <linux/list.h>
|
2017-07-04 20:53:07 +08:00
|
|
|
#include <linux/refcount.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <net/gen_stats.h>
|
2007-03-23 02:55:50 +08:00
|
|
|
#include <net/rtnetlink.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
struct Qdisc_ops;
|
|
|
|
struct qdisc_walker;
|
|
|
|
struct tcf_walker;
|
|
|
|
struct module;
|
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
struct qdisc_rate_table {
|
2005-04-17 06:20:36 +08:00
|
|
|
struct tc_ratespec rate;
|
|
|
|
u32 data[256];
|
|
|
|
struct qdisc_rate_table *next;
|
|
|
|
int refcnt;
|
|
|
|
};
|
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
enum qdisc_state_t {
|
2008-07-16 17:15:04 +08:00
|
|
|
__QDISC_STATE_SCHED,
|
2008-08-18 12:51:03 +08:00
|
|
|
__QDISC_STATE_DEACTIVATED,
|
2008-07-16 15:56:32 +08:00
|
|
|
};
|
|
|
|
|
2008-07-20 15:08:47 +08:00
|
|
|
struct qdisc_size_table {
|
2011-01-20 11:48:19 +08:00
|
|
|
struct rcu_head rcu;
|
2008-07-20 15:08:47 +08:00
|
|
|
struct list_head list;
|
|
|
|
struct tc_sizespec szopts;
|
|
|
|
int refcnt;
|
|
|
|
u16 data[];
|
|
|
|
};
|
|
|
|
|
2016-09-18 06:57:34 +08:00
|
|
|
/* similar to sk_buff_head, but skb->prev pointer is undefined. */
|
|
|
|
struct qdisc_skb_head {
|
|
|
|
struct sk_buff *head;
|
|
|
|
struct sk_buff *tail;
|
|
|
|
__u32 qlen;
|
|
|
|
spinlock_t lock;
|
|
|
|
};
|
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
struct Qdisc {
|
2016-06-22 14:16:49 +08:00
|
|
|
int (*enqueue)(struct sk_buff *skb,
|
|
|
|
struct Qdisc *sch,
|
|
|
|
struct sk_buff **to_free);
|
|
|
|
struct sk_buff * (*dequeue)(struct Qdisc *sch);
|
2011-10-21 05:45:43 +08:00
|
|
|
unsigned int flags;
|
2009-02-01 17:12:42 +08:00
|
|
|
#define TCQ_F_BUILTIN 1
|
2011-01-20 13:27:16 +08:00
|
|
|
#define TCQ_F_INGRESS 2
|
|
|
|
#define TCQ_F_CAN_BYPASS 4
|
|
|
|
#define TCQ_F_MQROOT 8
|
2012-12-11 23:54:33 +08:00
|
|
|
#define TCQ_F_ONETXQUEUE 0x10 /* dequeue_skb() can assume all skbs are for
|
|
|
|
* q->dev_queue : It can test
|
|
|
|
* netif_xmit_frozen_or_stopped() before
|
|
|
|
* dequeueing next packet.
|
|
|
|
* Its true for MQ/MQPRIO slaves, or non
|
|
|
|
* multiqueue device.
|
|
|
|
*/
|
2009-02-01 17:12:42 +08:00
|
|
|
#define TCQ_F_WARN_NONWC (1 << 16)
|
2014-09-29 02:52:56 +08:00
|
|
|
#define TCQ_F_CPUSTATS 0x20 /* run using percpu statistics */
|
2015-12-02 12:08:51 +08:00
|
|
|
#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
|
|
|
|
* qdisc_tree_decrease_qlen() should stop.
|
|
|
|
*/
|
2017-03-08 23:03:32 +08:00
|
|
|
#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
|
2013-06-06 23:43:22 +08:00
|
|
|
u32 limit;
|
2011-10-21 05:45:43 +08:00
|
|
|
const struct Qdisc_ops *ops;
|
2011-01-20 11:48:19 +08:00
|
|
|
struct qdisc_size_table __rcu *stab;
|
2016-08-10 17:05:15 +08:00
|
|
|
struct hlist_node hash;
|
2005-04-17 06:20:36 +08:00
|
|
|
u32 handle;
|
|
|
|
u32 parent;
|
2008-07-19 11:54:17 +08:00
|
|
|
|
net: reorder struct Qdisc for better SMP performance
dev_queue_xmit() needs to dirty fields "state", "q", "bstats" and "qstats"
On x86_64 arch, they currently span three cache lines, involving more
cache line ping pongs than necessary, making longer holding of queue spinlock.
We can reduce this to one cache line, by grouping all read-mostly fields
at the beginning of structure. (Or should I say, all highly modified fields
at the end :) )
Before patch :
offsetof(struct Qdisc, state)=0x38
offsetof(struct Qdisc, q)=0x48
offsetof(struct Qdisc, bstats)=0x80
offsetof(struct Qdisc, qstats)=0x90
sizeof(struct Qdisc)=0xc8
After patch :
offsetof(struct Qdisc, state)=0x80
offsetof(struct Qdisc, q)=0x88
offsetof(struct Qdisc, bstats)=0xa0
offsetof(struct Qdisc, qstats)=0xac
sizeof(struct Qdisc)=0xc0
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-03-20 16:33:32 +08:00
|
|
|
struct netdev_queue *dev_queue;
|
|
|
|
|
2016-12-05 01:48:16 +08:00
|
|
|
struct net_rate_estimator __rcu *rate_est;
|
2015-01-30 09:30:12 +08:00
|
|
|
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
|
|
|
|
struct gnet_stats_queue __percpu *cpu_qstats;
|
|
|
|
|
net: reorder struct Qdisc for better SMP performance
dev_queue_xmit() needs to dirty fields "state", "q", "bstats" and "qstats"
On x86_64 arch, they currently span three cache lines, involving more
cache line ping pongs than necessary, making longer holding of queue spinlock.
We can reduce this to one cache line, by grouping all read-mostly fields
at the beginning of structure. (Or should I say, all highly modified fields
at the end :) )
Before patch :
offsetof(struct Qdisc, state)=0x38
offsetof(struct Qdisc, q)=0x48
offsetof(struct Qdisc, bstats)=0x80
offsetof(struct Qdisc, qstats)=0x90
sizeof(struct Qdisc)=0xc8
After patch :
offsetof(struct Qdisc, state)=0x80
offsetof(struct Qdisc, q)=0x88
offsetof(struct Qdisc, bstats)=0xa0
offsetof(struct Qdisc, qstats)=0xac
sizeof(struct Qdisc)=0xc0
Signed-off-by: Eric Dumazet <dada1@cosmosbay.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2009-03-20 16:33:32 +08:00
|
|
|
/*
|
|
|
|
* For performance sake on SMP, we put highly modified fields at the end
|
|
|
|
*/
|
net_sched: generalize bulk dequeue
When qdisc bulk dequeue was added in linux-3.18 (commit
5772e9a3463b "qdisc: bulk dequeue support for qdiscs
with TCQ_F_ONETXQUEUE"), it was constrained to some
specific qdiscs.
With some extra care, we can extend this to all qdiscs,
so that typical traffic shaping solutions can benefit from
small batches (8 packets in this patch).
For example, HTB is often used on some multi queue device.
And bonding/team are multi queue devices...
Idea is to bulk-dequeue packets mapping to the same transmit queue.
This brings between 35 and 80 % performance increase in HTB setup
under pressure on a bonding setup :
1) NUMA node contention : 610,000 pps -> 1,110,000 pps
2) No node contention : 1,380,000 pps -> 1,930,000 pps
Now we should work to add batches on the enqueue() side ;)
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: John Fastabend <john.r.fastabend@intel.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Cc: Florian Westphal <fw@strlen.de>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-06-22 14:16:52 +08:00
|
|
|
struct sk_buff *gso_skb ____cacheline_aligned_in_smp;
|
2016-09-18 06:57:34 +08:00
|
|
|
struct qdisc_skb_head q;
|
2015-01-30 09:30:12 +08:00
|
|
|
struct gnet_stats_basic_packed bstats;
|
2016-06-07 00:37:15 +08:00
|
|
|
seqcount_t running;
|
2015-01-30 09:30:12 +08:00
|
|
|
struct gnet_stats_queue qstats;
|
net_sched: generalize bulk dequeue
When qdisc bulk dequeue was added in linux-3.18 (commit
5772e9a3463b "qdisc: bulk dequeue support for qdiscs
with TCQ_F_ONETXQUEUE"), it was constrained to some
specific qdiscs.
With some extra care, we can extend this to all qdiscs,
so that typical traffic shaping solutions can benefit from
small batches (8 packets in this patch).
For example, HTB is often used on some multi queue device.
And bonding/team are multi queue devices...
Idea is to bulk-dequeue packets mapping to the same transmit queue.
This brings between 35 and 80 % performance increase in HTB setup
under pressure on a bonding setup :
1) NUMA node contention : 610,000 pps -> 1,110,000 pps
2) No node contention : 1,380,000 pps -> 1,930,000 pps
Now we should work to add batches on the enqueue() side ;)
Signed-off-by: Eric Dumazet <edumazet@google.com>
Cc: John Fastabend <john.r.fastabend@intel.com>
Cc: Jesper Dangaard Brouer <brouer@redhat.com>
Cc: Hannes Frederic Sowa <hannes@stressinduktion.org>
Cc: Florian Westphal <fw@strlen.de>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2016-06-22 14:16:52 +08:00
|
|
|
unsigned long state;
|
|
|
|
struct Qdisc *next_sched;
|
|
|
|
struct sk_buff *skb_bad_txq;
|
2013-06-06 23:43:22 +08:00
|
|
|
int padded;
|
2017-07-04 20:53:07 +08:00
|
|
|
refcount_t refcnt;
|
2013-06-06 23:43:22 +08:00
|
|
|
|
|
|
|
spinlock_t busylock ____cacheline_aligned_in_smp;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2017-08-25 12:12:28 +08:00
|
|
|
static inline void qdisc_refcount_inc(struct Qdisc *qdisc)
|
|
|
|
{
|
|
|
|
if (qdisc->flags & TCQ_F_BUILTIN)
|
|
|
|
return;
|
|
|
|
refcount_inc(&qdisc->refcnt);
|
|
|
|
}
|
|
|
|
|
2011-01-20 13:27:16 +08:00
|
|
|
static inline bool qdisc_is_running(const struct Qdisc *qdisc)
|
2010-06-02 18:23:51 +08:00
|
|
|
{
|
2016-06-07 00:37:15 +08:00
|
|
|
return (raw_read_seqcount(&qdisc->running) & 1) ? true : false;
|
2010-06-02 18:23:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
|
|
|
|
{
|
2011-01-20 13:27:16 +08:00
|
|
|
if (qdisc_is_running(qdisc))
|
|
|
|
return false;
|
2016-06-09 22:45:11 +08:00
|
|
|
/* Variant of write_seqcount_begin() telling lockdep a trylock
|
|
|
|
* was attempted.
|
|
|
|
*/
|
|
|
|
raw_write_seqcount_begin(&qdisc->running);
|
|
|
|
seqcount_acquire(&qdisc->running.dep_map, 0, 1, _RET_IP_);
|
2011-01-20 13:27:16 +08:00
|
|
|
return true;
|
2010-06-02 18:23:51 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void qdisc_run_end(struct Qdisc *qdisc)
|
|
|
|
{
|
2016-06-07 00:37:15 +08:00
|
|
|
write_seqcount_end(&qdisc->running);
|
2011-01-20 13:27:16 +08:00
|
|
|
}
|
|
|
|
|
qdisc: bulk dequeue support for qdiscs with TCQ_F_ONETXQUEUE
Based on DaveM's recent API work on dev_hard_start_xmit(), that allows
sending/processing an entire skb list.
This patch implements qdisc bulk dequeue, by allowing multiple packets
to be dequeued in dequeue_skb().
The optimization principle for this is two fold, (1) to amortize
locking cost and (2) avoid expensive tailptr update for notifying HW.
(1) Several packets are dequeued while holding the qdisc root_lock,
amortizing locking cost over several packet. The dequeued SKB list is
processed under the TXQ lock in dev_hard_start_xmit(), thus also
amortizing the cost of the TXQ lock.
(2) Further more, dev_hard_start_xmit() will utilize the skb->xmit_more
API to delay HW tailptr update, which also reduces the cost per
packet.
One restriction of the new API is that every SKB must belong to the
same TXQ. This patch takes the easy way out, by restricting bulk
dequeue to qdisc's with the TCQ_F_ONETXQUEUE flag, that specifies the
qdisc only have attached a single TXQ.
Some detail about the flow; dev_hard_start_xmit() will process the skb
list, and transmit packets individually towards the driver (see
xmit_one()). In case the driver stops midway in the list, the
remaining skb list is returned by dev_hard_start_xmit(). In
sch_direct_xmit() this returned list is requeued by dev_requeue_skb().
To avoid overshooting the HW limits, which results in requeuing, the
patch limits the amount of bytes dequeued, based on the drivers BQL
limits. In-effect bulking will only happen for BQL enabled drivers.
Small amounts for extra HoL blocking (2x MTU/0.24ms) were
measured at 100Mbit/s, with bulking 8 packets, but the
oscillating nature of the measurement indicate something, like
sched latency might be causing this effect. More comparisons
show, that this oscillation goes away occationally. Thus, we
disregard this artifact completely and remove any "magic" bulking
limit.
For now, as a conservative approach, stop bulking when seeing TSO and
segmented GSO packets. They already benefit from bulking on their own.
A followup patch add this, to allow easier bisect-ability for finding
regressions.
Jointed work with Hannes, Daniel and Florian.
Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com>
Signed-off-by: Hannes Frederic Sowa <hannes@stressinduktion.org>
Signed-off-by: Daniel Borkmann <dborkman@redhat.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
2014-10-02 04:35:59 +08:00
|
|
|
static inline bool qdisc_may_bulk(const struct Qdisc *qdisc)
|
|
|
|
{
|
|
|
|
return qdisc->flags & TCQ_F_ONETXQUEUE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int qdisc_avail_bulklimit(const struct netdev_queue *txq)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_BQL
|
|
|
|
/* Non-BQL migrated drivers will return 0, too. */
|
|
|
|
return dql_avail(&txq->dql);
|
|
|
|
#else
|
|
|
|
return 0;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
struct Qdisc_class_ops {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Child qdisc manipulation */
|
2009-09-15 17:53:07 +08:00
|
|
|
struct netdev_queue * (*select_queue)(struct Qdisc *, struct tcmsg *);
|
2005-04-17 06:20:36 +08:00
|
|
|
int (*graft)(struct Qdisc *, unsigned long cl,
|
|
|
|
struct Qdisc *, struct Qdisc **);
|
|
|
|
struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
|
2006-11-30 09:35:48 +08:00
|
|
|
void (*qlen_notify)(struct Qdisc *, unsigned long);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* Class manipulation routines */
|
net_sched: remove tc class reference counting
For TC classes, their ->get() and ->put() are always paired, and the
reference counting is completely useless, because:
1) For class modification and dumping paths, we already hold RTNL lock,
so all of these ->get(),->change(),->put() are atomic.
2) For filter bindiing/unbinding, we use other reference counter than
this one, and they should have RTNL lock too.
3) For ->qlen_notify(), it is special because it is called on ->enqueue()
path, but we already hold qdisc tree lock there, and we hold this
tree lock when graft or delete the class too, so it should not be gone
or changed until we release the tree lock.
Therefore, this patch removes ->get() and ->put(), but:
1) Adds a new ->find() to find the pointer to a class by classid, no
refcnt.
2) Move the original class destroy upon the last refcnt into ->delete(),
right after releasing tree lock. This is fine because the class is
already removed from hash when holding the lock.
For those who also use ->put() as ->unbind(), just rename them to reflect
this change.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
Acked-by: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-25 07:51:29 +08:00
|
|
|
unsigned long (*find)(struct Qdisc *, u32 classid);
|
2005-04-17 06:20:36 +08:00
|
|
|
int (*change)(struct Qdisc *, u32, u32,
|
2008-01-23 14:11:17 +08:00
|
|
|
struct nlattr **, unsigned long *);
|
2005-04-17 06:20:36 +08:00
|
|
|
int (*delete)(struct Qdisc *, unsigned long);
|
|
|
|
void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
|
|
|
|
|
|
|
|
/* Filter manipulation */
|
2017-05-17 17:07:55 +08:00
|
|
|
struct tcf_block * (*tcf_block)(struct Qdisc *, unsigned long);
|
2005-04-17 06:20:36 +08:00
|
|
|
unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
|
|
|
|
u32 classid);
|
|
|
|
void (*unbind_tcf)(struct Qdisc *, unsigned long);
|
|
|
|
|
|
|
|
/* rtnetlink specific */
|
|
|
|
int (*dump)(struct Qdisc *, unsigned long,
|
|
|
|
struct sk_buff *skb, struct tcmsg*);
|
|
|
|
int (*dump_stats)(struct Qdisc *, unsigned long,
|
|
|
|
struct gnet_dump *);
|
|
|
|
};
|
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
struct Qdisc_ops {
|
2005-04-17 06:20:36 +08:00
|
|
|
struct Qdisc_ops *next;
|
2007-11-14 17:44:41 +08:00
|
|
|
const struct Qdisc_class_ops *cl_ops;
|
2005-04-17 06:20:36 +08:00
|
|
|
char id[IFNAMSIZ];
|
|
|
|
int priv_size;
|
|
|
|
|
2016-06-22 14:16:49 +08:00
|
|
|
int (*enqueue)(struct sk_buff *skb,
|
|
|
|
struct Qdisc *sch,
|
|
|
|
struct sk_buff **to_free);
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sk_buff * (*dequeue)(struct Qdisc *);
|
2008-10-31 15:43:45 +08:00
|
|
|
struct sk_buff * (*peek)(struct Qdisc *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2008-01-23 14:11:17 +08:00
|
|
|
int (*init)(struct Qdisc *, struct nlattr *arg);
|
2005-04-17 06:20:36 +08:00
|
|
|
void (*reset)(struct Qdisc *);
|
|
|
|
void (*destroy)(struct Qdisc *);
|
2008-01-23 14:11:17 +08:00
|
|
|
int (*change)(struct Qdisc *, struct nlattr *arg);
|
2009-09-06 16:58:51 +08:00
|
|
|
void (*attach)(struct Qdisc *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
int (*dump)(struct Qdisc *, struct sk_buff *);
|
|
|
|
int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
|
|
|
|
|
|
|
|
struct module *owner;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
struct tcf_result {
|
2017-05-17 17:08:03 +08:00
|
|
|
union {
|
|
|
|
struct {
|
|
|
|
unsigned long class;
|
|
|
|
u32 classid;
|
|
|
|
};
|
|
|
|
const struct tcf_proto *goto_tp;
|
|
|
|
};
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
struct tcf_proto_ops {
|
2013-12-16 12:15:11 +08:00
|
|
|
struct list_head head;
|
2005-04-17 06:20:36 +08:00
|
|
|
char kind[IFNAMSIZ];
|
|
|
|
|
2011-07-06 07:25:42 +08:00
|
|
|
int (*classify)(struct sk_buff *,
|
|
|
|
const struct tcf_proto *,
|
|
|
|
struct tcf_result *);
|
2005-04-17 06:20:36 +08:00
|
|
|
int (*init)(struct tcf_proto*);
|
2017-04-20 05:21:21 +08:00
|
|
|
void (*destroy)(struct tcf_proto*);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-08-05 12:31:43 +08:00
|
|
|
void* (*get)(struct tcf_proto*, u32 handle);
|
2013-01-14 13:15:39 +08:00
|
|
|
int (*change)(struct net *net, struct sk_buff *,
|
2012-05-26 03:42:45 +08:00
|
|
|
struct tcf_proto*, unsigned long,
|
2008-01-23 14:11:33 +08:00
|
|
|
u32 handle, struct nlattr **,
|
2017-08-05 12:31:43 +08:00
|
|
|
void **, bool);
|
|
|
|
int (*delete)(struct tcf_proto*, void *, bool*);
|
2005-04-17 06:20:36 +08:00
|
|
|
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
|
net_sched: add reverse binding for tc class
TC filters when used as classifiers are bound to TC classes.
However, there is a hidden difference when adding them in different
orders:
1. If we add tc classes before its filters, everything is fine.
Logically, the classes exist before we specify their ID's in
filters, it is easy to bind them together, just as in the current
code base.
2. If we add tc filters before the tc classes they bind, we have to
do dynamic lookup in fast path. What's worse, this happens all
the time not just once, because on fast path tcf_result is passed
on stack, there is no way to propagate back to the one in tc filters.
This hidden difference hurts performance silently if we have many tc
classes in hierarchy.
This patch intends to close this gap by doing the reverse binding when
we create a new class, in this case we can actually search all the
filters in its parent, match and fixup by classid. And because
tcf_result is specific to each type of tc filter, we have to introduce
a new ops for each filter to tell how to bind the class.
Note, we still can NOT totally get rid of those class lookup in
->enqueue() because cgroup and flow filters have no way to determine
the classid at setup time, they still have to go through dynamic lookup.
Cc: Jamal Hadi Salim <jhs@mojatatu.com>
Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
2017-08-31 05:30:36 +08:00
|
|
|
void (*bind_class)(void *, u32, unsigned long);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* rtnetlink specific */
|
2017-08-05 12:31:43 +08:00
|
|
|
int (*dump)(struct net*, struct tcf_proto*, void *,
|
2005-04-17 06:20:36 +08:00
|
|
|
struct sk_buff *skb, struct tcmsg*);
|
|
|
|
|
|
|
|
struct module *owner;
|
|
|
|
};
|
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
struct tcf_proto {
|
2005-04-17 06:20:36 +08:00
|
|
|
/* Fast access part */
|
2014-09-13 11:05:27 +08:00
|
|
|
struct tcf_proto __rcu *next;
|
|
|
|
void __rcu *root;
|
2011-07-06 07:25:42 +08:00
|
|
|
int (*classify)(struct sk_buff *,
|
|
|
|
const struct tcf_proto *,
|
|
|
|
struct tcf_result *);
|
2006-11-21 10:07:51 +08:00
|
|
|
__be16 protocol;
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
/* All the rest */
|
|
|
|
u32 prio;
|
|
|
|
u32 classid;
|
|
|
|
struct Qdisc *q;
|
|
|
|
void *data;
|
2011-07-06 07:25:42 +08:00
|
|
|
const struct tcf_proto_ops *ops;
|
2017-05-17 17:08:01 +08:00
|
|
|
struct tcf_chain *chain;
|
2014-09-13 11:05:27 +08:00
|
|
|
struct rcu_head rcu;
|
2005-04-17 06:20:36 +08:00
|
|
|
};
|
|
|
|
|
2008-07-20 15:08:47 +08:00
|
|
|
struct qdisc_skb_cb {
|
|
|
|
unsigned int pkt_len;
|
2012-07-20 10:28:49 +08:00
|
|
|
u16 slave_dev_queue_mapping;
|
2015-09-16 14:05:42 +08:00
|
|
|
u16 tc_classid;
|
2014-09-18 23:02:05 +08:00
|
|
|
#define QDISC_CB_PRIV_LEN 20
|
|
|
|
unsigned char data[QDISC_CB_PRIV_LEN];
|
2008-07-20 15:08:47 +08:00
|
|
|
};
|
|
|
|
|
2017-05-17 17:07:59 +08:00
|
|
|
struct tcf_chain {
|
|
|
|
struct tcf_proto __rcu *filter_chain;
|
2017-05-17 17:07:55 +08:00
|
|
|
struct tcf_proto __rcu **p_filter_chain;
|
2017-05-17 17:08:01 +08:00
|
|
|
struct list_head list;
|
|
|
|
struct tcf_block *block;
|
|
|
|
u32 index; /* chain index */
|
|
|
|
unsigned int refcnt;
|
2017-05-17 17:07:55 +08:00
|
|
|
};
|
|
|
|
|
2017-05-17 17:07:59 +08:00
|
|
|
struct tcf_block {
|
2017-05-17 17:08:01 +08:00
|
|
|
struct list_head chain_list;
|
2017-10-13 20:00:58 +08:00
|
|
|
struct net *net;
|
2017-10-13 20:00:57 +08:00
|
|
|
struct Qdisc *q;
|
2017-05-17 17:07:59 +08:00
|
|
|
};
|
|
|
|
|
2012-02-07 04:14:37 +08:00
|
|
|
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
|
|
|
|
{
|
|
|
|
struct qdisc_skb_cb *qcb;
|
2012-06-12 14:03:51 +08:00
|
|
|
|
|
|
|
BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
|
2012-02-07 04:14:37 +08:00
|
|
|
BUILD_BUG_ON(sizeof(qcb->data) < sz);
|
|
|
|
}
|
|
|
|
|
2011-10-21 05:45:43 +08:00
|
|
|
static inline int qdisc_qlen(const struct Qdisc *q)
|
2009-08-06 09:44:21 +08:00
|
|
|
{
|
|
|
|
return q->q.qlen;
|
|
|
|
}
|
|
|
|
|
2011-01-09 16:30:54 +08:00
|
|
|
static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
|
2008-07-20 15:08:47 +08:00
|
|
|
{
|
|
|
|
return (struct qdisc_skb_cb *)skb->cb;
|
|
|
|
}
|
|
|
|
|
2008-07-17 15:53:03 +08:00
|
|
|
static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
|
|
|
|
{
|
|
|
|
return &qdisc->q.lock;
|
|
|
|
}
|
|
|
|
|
2011-10-21 05:45:43 +08:00
|
|
|
static inline struct Qdisc *qdisc_root(const struct Qdisc *qdisc)
|
2008-07-16 16:42:40 +08:00
|
|
|
{
|
2014-09-13 11:04:52 +08:00
|
|
|
struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc);
|
|
|
|
|
|
|
|
return q;
|
2008-07-16 16:42:40 +08:00
|
|
|
}
|
|
|
|
|
2011-10-21 05:45:43 +08:00
|
|
|
static inline struct Qdisc *qdisc_root_sleeping(const struct Qdisc *qdisc)
|
2008-08-21 20:11:14 +08:00
|
|
|
{
|
|
|
|
return qdisc->dev_queue->qdisc_sleeping;
|
|
|
|
}
|
|
|
|
|
2008-08-03 14:27:37 +08:00
|
|
|
/* The qdisc root lock is a mechanism by which to top level
|
|
|
|
* of a qdisc tree can be locked from any qdisc node in the
|
|
|
|
* forest. This allows changing the configuration of some
|
|
|
|
* aspect of the qdisc tree while blocking out asynchronous
|
|
|
|
* qdisc access in the packet processing paths.
|
|
|
|
*
|
|
|
|
* It is only legal to do this when the root will not change
|
|
|
|
* on us. Otherwise we'll potentially lock the wrong qdisc
|
|
|
|
* root. This is enforced by holding the RTNL semaphore, which
|
|
|
|
* all users of this lock accessor must do.
|
|
|
|
*/
|
2011-10-21 05:45:43 +08:00
|
|
|
static inline spinlock_t *qdisc_root_lock(const struct Qdisc *qdisc)
|
2008-07-16 16:42:40 +08:00
|
|
|
{
|
|
|
|
struct Qdisc *root = qdisc_root(qdisc);
|
|
|
|
|
2008-08-03 14:27:37 +08:00
|
|
|
ASSERT_RTNL();
|
2008-07-17 15:53:03 +08:00
|
|
|
return qdisc_lock(root);
|
2008-07-16 16:42:40 +08:00
|
|
|
}
|
|
|
|
|
2011-10-21 05:45:43 +08:00
|
|
|
static inline spinlock_t *qdisc_root_sleeping_lock(const struct Qdisc *qdisc)
|
2008-08-27 17:25:17 +08:00
|
|
|
{
|
|
|
|
struct Qdisc *root = qdisc_root_sleeping(qdisc);
|
|
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
|
return qdisc_lock(root);
|
|
|
|
}
|
|
|
|
|
2016-06-07 00:37:16 +08:00
|
|
|
static inline seqcount_t *qdisc_root_sleeping_running(const struct Qdisc *qdisc)
|
|
|
|
{
|
|
|
|
struct Qdisc *root = qdisc_root_sleeping(qdisc);
|
|
|
|
|
|
|
|
ASSERT_RTNL();
|
|
|
|
return &root->running;
|
|
|
|
}
|
|
|
|
|
2011-10-21 05:45:43 +08:00
|
|
|
static inline struct net_device *qdisc_dev(const struct Qdisc *qdisc)
|
2008-07-09 08:06:30 +08:00
|
|
|
{
|
|
|
|
return qdisc->dev_queue->dev;
|
|
|
|
}
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2011-10-21 05:45:43 +08:00
|
|
|
static inline void sch_tree_lock(const struct Qdisc *q)
|
2008-07-16 18:12:24 +08:00
|
|
|
{
|
2008-08-27 17:27:10 +08:00
|
|
|
spin_lock_bh(qdisc_root_sleeping_lock(q));
|
2008-07-16 18:12:24 +08:00
|
|
|
}
|
|
|
|
|
2011-10-21 05:45:43 +08:00
|
|
|
static inline void sch_tree_unlock(const struct Qdisc *q)
|
2008-07-16 18:12:24 +08:00
|
|
|
{
|
2008-08-27 17:27:10 +08:00
|
|
|
spin_unlock_bh(qdisc_root_sleeping_lock(q));
|
2008-07-16 18:12:24 +08:00
|
|
|
}
|
|
|
|
|
2005-07-06 05:14:30 +08:00
|
|
|
extern struct Qdisc noop_qdisc;
|
|
|
|
extern struct Qdisc_ops noop_qdisc_ops;
|
2009-09-06 16:58:51 +08:00
|
|
|
extern struct Qdisc_ops pfifo_fast_ops;
|
|
|
|
extern struct Qdisc_ops mq_qdisc_ops;
|
2015-08-28 03:21:38 +08:00
|
|
|
extern struct Qdisc_ops noqueue_qdisc_ops;
|
2013-08-28 07:19:08 +08:00
|
|
|
extern const struct Qdisc_ops *default_qdisc_ops;
|
2016-03-03 00:21:43 +08:00
|
|
|
static inline const struct Qdisc_ops *
|
|
|
|
get_default_qdisc_ops(const struct net_device *dev, int ntx)
|
|
|
|
{
|
|
|
|
return ntx < dev->real_num_tx_queues ?
|
|
|
|
default_qdisc_ops : &pfifo_fast_ops;
|
|
|
|
}
|
2005-07-06 05:14:30 +08:00
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
struct Qdisc_class_common {
|
2008-07-06 14:21:31 +08:00
|
|
|
u32 classid;
|
|
|
|
struct hlist_node hnode;
|
|
|
|
};
|
|
|
|
|
2009-11-03 11:26:03 +08:00
|
|
|
struct Qdisc_class_hash {
|
2008-07-06 14:21:31 +08:00
|
|
|
struct hlist_head *hash;
|
|
|
|
unsigned int hashsize;
|
|
|
|
unsigned int hashmask;
|
|
|
|
unsigned int hashelems;
|
|
|
|
};
|
|
|
|
|
|
|
|
static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
|
|
|
|
{
|
|
|
|
id ^= id >> 8;
|
|
|
|
id ^= id >> 4;
|
|
|
|
return id & mask;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct Qdisc_class_common *
|
2011-10-21 05:45:43 +08:00
|
|
|
qdisc_class_find(const struct Qdisc_class_hash *hash, u32 id)
|
2008-07-06 14:21:31 +08:00
|
|
|
{
|
|
|
|
struct Qdisc_class_common *cl;
|
|
|
|
unsigned int h;
|
|
|
|
|
2017-08-18 15:23:24 +08:00
|
|
|
if (!id)
|
|
|
|
return NULL;
|
|
|
|
|
2008-07-06 14:21:31 +08:00
|
|
|
h = qdisc_class_hash(id, hash->hashmask);
|
hlist: drop the node parameter from iterators
I'm not sure why, but the hlist for each entry iterators were conceived
list_for_each_entry(pos, head, member)
The hlist ones were greedy and wanted an extra parameter:
hlist_for_each_entry(tpos, pos, head, member)
Why did they need an extra pos parameter? I'm not quite sure. Not only
they don't really need it, it also prevents the iterator from looking
exactly like the list iterator, which is unfortunate.
Besides the semantic patch, there was some manual work required:
- Fix up the actual hlist iterators in linux/list.h
- Fix up the declaration of other iterators based on the hlist ones.
- A very small amount of places were using the 'node' parameter, this
was modified to use 'obj->member' instead.
- Coccinelle didn't handle the hlist_for_each_entry_safe iterator
properly, so those had to be fixed up manually.
The semantic patch which is mostly the work of Peter Senna Tschudin is here:
@@
iterator name hlist_for_each_entry, hlist_for_each_entry_continue, hlist_for_each_entry_from, hlist_for_each_entry_rcu, hlist_for_each_entry_rcu_bh, hlist_for_each_entry_continue_rcu_bh, for_each_busy_worker, ax25_uid_for_each, ax25_for_each, inet_bind_bucket_for_each, sctp_for_each_hentry, sk_for_each, sk_for_each_rcu, sk_for_each_from, sk_for_each_safe, sk_for_each_bound, hlist_for_each_entry_safe, hlist_for_each_entry_continue_rcu, nr_neigh_for_each, nr_neigh_for_each_safe, nr_node_for_each, nr_node_for_each_safe, for_each_gfn_indirect_valid_sp, for_each_gfn_sp, for_each_host;
type T;
expression a,c,d,e;
identifier b;
statement S;
@@
-T b;
<+... when != b
(
hlist_for_each_entry(a,
- b,
c, d) S
|
hlist_for_each_entry_continue(a,
- b,
c) S
|
hlist_for_each_entry_from(a,
- b,
c) S
|
hlist_for_each_entry_rcu(a,
- b,
c, d) S
|
hlist_for_each_entry_rcu_bh(a,
- b,
c, d) S
|
hlist_for_each_entry_continue_rcu_bh(a,
- b,
c) S
|
for_each_busy_worker(a, c,
- b,
d) S
|
ax25_uid_for_each(a,
- b,
c) S
|
ax25_for_each(a,
- b,
c) S
|
inet_bind_bucket_for_each(a,
- b,
c) S
|
sctp_for_each_hentry(a,
- b,
c) S
|
sk_for_each(a,
- b,
c) S
|
sk_for_each_rcu(a,
- b,
c) S
|
sk_for_each_from
-(a, b)
+(a)
S
+ sk_for_each_from(a) S
|
sk_for_each_safe(a,
- b,
c, d) S
|
sk_for_each_bound(a,
- b,
c) S
|
hlist_for_each_entry_safe(a,
- b,
c, d, e) S
|
hlist_for_each_entry_continue_rcu(a,
- b,
c) S
|
nr_neigh_for_each(a,
- b,
c) S
|
nr_neigh_for_each_safe(a,
- b,
c, d) S
|
nr_node_for_each(a,
- b,
c) S
|
nr_node_for_each_safe(a,
- b,
c, d) S
|
- for_each_gfn_sp(a, c, d, b) S
+ for_each_gfn_sp(a, c, d) S
|
- for_each_gfn_indirect_valid_sp(a, c, d, b) S
+ for_each_gfn_indirect_valid_sp(a, c, d) S
|
for_each_host(a,
- b,
c) S
|
for_each_host_safe(a,
- b,
c, d) S
|
for_each_mesh_entry(a,
- b,
c, d) S
)
...+>
[akpm@linux-foundation.org: drop bogus change from net/ipv4/raw.c]
[akpm@linux-foundation.org: drop bogus hunk from net/ipv6/raw.c]
[akpm@linux-foundation.org: checkpatch fixes]
[akpm@linux-foundation.org: fix warnings]
[akpm@linux-foudnation.org: redo intrusive kvm changes]
Tested-by: Peter Senna Tschudin <peter.senna@gmail.com>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Signed-off-by: Sasha Levin <sasha.levin@oracle.com>
Cc: Wu Fengguang <fengguang.wu@intel.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2013-02-28 09:06:00 +08:00
|
|
|
hlist_for_each_entry(cl, &hash->hash[h], hnode) {
|
2008-07-06 14:21:31 +08:00
|
|
|
if (cl->classid == id)
|
|
|
|
return cl;
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2013-07-31 13:47:13 +08:00
|
|
|
int qdisc_class_hash_init(struct Qdisc_class_hash *);
|
|
|
|
void qdisc_class_hash_insert(struct Qdisc_class_hash *,
|
|
|
|
struct Qdisc_class_common *);
|
|
|
|
void qdisc_class_hash_remove(struct Qdisc_class_hash *,
|
|
|
|
struct Qdisc_class_common *);
|
|
|
|
void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
|
|
|
|
void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
|
|
|
|
|
|
|
|
void dev_init_scheduler(struct net_device *dev);
|
|
|
|
void dev_shutdown(struct net_device *dev);
|
|
|
|
void dev_activate(struct net_device *dev);
|
|
|
|
void dev_deactivate(struct net_device *dev);
|
|
|
|
void dev_deactivate_many(struct list_head *head);
|
|
|
|
struct Qdisc *dev_graft_qdisc(struct netdev_queue *dev_queue,
|
|
|
|
struct Qdisc *qdisc);
|
|
|
|
void qdisc_reset(struct Qdisc *qdisc);
|
|
|
|
void qdisc_destroy(struct Qdisc *qdisc);
|
2016-02-26 06:55:01 +08:00
|
|
|
void qdisc_tree_reduce_backlog(struct Qdisc *qdisc, unsigned int n,
|
|
|
|
unsigned int len);
|
2013-07-31 13:47:13 +08:00
|
|
|
struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
|
2013-09-01 01:15:50 +08:00
|
|
|
const struct Qdisc_ops *ops);
|
2013-07-31 13:47:13 +08:00
|
|
|
struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue,
|
2013-09-01 01:15:50 +08:00
|
|
|
const struct Qdisc_ops *ops, u32 parentid);
|
2013-07-31 13:47:13 +08:00
|
|
|
void __qdisc_calculate_pkt_len(struct sk_buff *skb,
|
|
|
|
const struct qdisc_size_table *stab);
|
2015-09-16 14:05:43 +08:00
|
|
|
int skb_do_redirect(struct sk_buff *);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2017-01-08 06:06:36 +08:00
|
|
|
static inline void skb_reset_tc(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
2017-01-08 06:06:38 +08:00
|
|
|
skb->tc_redirected = 0;
|
2017-01-08 06:06:36 +08:00
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2016-01-07 22:50:22 +08:00
|
|
|
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
2017-01-08 06:06:37 +08:00
|
|
|
return skb->tc_at_ingress;
|
2016-01-07 22:50:22 +08:00
|
|
|
#else
|
|
|
|
return false;
|
|
|
|
#endif
|
|
|
|
}
|
|
|
|
|
2017-01-08 06:06:35 +08:00
|
|
|
static inline bool skb_skip_tc_classify(struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
|
|
|
if (skb->tc_skip_classify) {
|
|
|
|
skb->tc_skip_classify = 0;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-07-01 21:21:57 +08:00
|
|
|
/* Reset all TX qdiscs greater then index of a device. */
|
|
|
|
static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
|
2008-07-09 13:59:10 +08:00
|
|
|
{
|
2010-07-01 21:21:35 +08:00
|
|
|
struct Qdisc *qdisc;
|
|
|
|
|
2010-07-01 21:21:57 +08:00
|
|
|
for (; i < dev->num_tx_queues; i++) {
|
2014-09-13 11:04:52 +08:00
|
|
|
qdisc = rtnl_dereference(netdev_get_tx_queue(dev, i)->qdisc);
|
2010-07-01 21:21:35 +08:00
|
|
|
if (qdisc) {
|
|
|
|
spin_lock_bh(qdisc_lock(qdisc));
|
|
|
|
qdisc_reset(qdisc);
|
|
|
|
spin_unlock_bh(qdisc_lock(qdisc));
|
|
|
|
}
|
|
|
|
}
|
2008-07-09 13:59:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void qdisc_reset_all_tx(struct net_device *dev)
|
|
|
|
{
|
2010-07-01 21:21:57 +08:00
|
|
|
qdisc_reset_all_tx_gt(dev, 0);
|
2008-07-09 13:59:10 +08:00
|
|
|
}
|
|
|
|
|
2008-07-09 14:00:25 +08:00
|
|
|
/* Are all TX queues of the device empty? */
|
|
|
|
static inline bool qdisc_all_tx_empty(const struct net_device *dev)
|
|
|
|
{
|
2008-07-17 15:34:19 +08:00
|
|
|
unsigned int i;
|
2014-09-13 11:04:52 +08:00
|
|
|
|
|
|
|
rcu_read_lock();
|
2008-07-17 15:34:19 +08:00
|
|
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
|
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
2014-09-13 11:04:52 +08:00
|
|
|
const struct Qdisc *q = rcu_dereference(txq->qdisc);
|
2008-07-09 14:00:25 +08:00
|
|
|
|
2014-09-13 11:04:52 +08:00
|
|
|
if (q->q.qlen) {
|
|
|
|
rcu_read_unlock();
|
2008-07-17 15:34:19 +08:00
|
|
|
return false;
|
2014-09-13 11:04:52 +08:00
|
|
|
}
|
2008-07-17 15:34:19 +08:00
|
|
|
}
|
2014-09-13 11:04:52 +08:00
|
|
|
rcu_read_unlock();
|
2008-07-17 15:34:19 +08:00
|
|
|
return true;
|
2008-07-09 14:00:25 +08:00
|
|
|
}
|
|
|
|
|
2008-07-09 14:01:06 +08:00
|
|
|
/* Are any of the TX qdiscs changing? */
|
2011-10-21 05:45:43 +08:00
|
|
|
static inline bool qdisc_tx_changing(const struct net_device *dev)
|
2008-07-09 14:01:06 +08:00
|
|
|
{
|
2008-07-17 15:34:19 +08:00
|
|
|
unsigned int i;
|
2014-09-13 11:04:52 +08:00
|
|
|
|
2008-07-17 15:34:19 +08:00
|
|
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
|
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
2014-09-13 11:04:52 +08:00
|
|
|
if (rcu_access_pointer(txq->qdisc) != txq->qdisc_sleeping)
|
2008-07-17 15:34:19 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
2008-07-09 14:01:06 +08:00
|
|
|
}
|
|
|
|
|
2008-07-17 15:34:19 +08:00
|
|
|
/* Is the device using the noop qdisc on all queues? */
|
2008-07-09 14:01:27 +08:00
|
|
|
static inline bool qdisc_tx_is_noop(const struct net_device *dev)
|
|
|
|
{
|
2008-07-17 15:34:19 +08:00
|
|
|
unsigned int i;
|
2014-09-13 11:04:52 +08:00
|
|
|
|
2008-07-17 15:34:19 +08:00
|
|
|
for (i = 0; i < dev->num_tx_queues; i++) {
|
|
|
|
struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
|
2014-09-13 11:04:52 +08:00
|
|
|
if (rcu_access_pointer(txq->qdisc) != &noop_qdisc)
|
2008-07-17 15:34:19 +08:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return true;
|
2008-07-09 14:01:27 +08:00
|
|
|
}
|
|
|
|
|
2011-01-09 16:30:54 +08:00
|
|
|
static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
|
2008-07-20 15:08:27 +08:00
|
|
|
{
|
2008-07-20 15:08:47 +08:00
|
|
|
return qdisc_skb_cb(skb)->pkt_len;
|
2008-07-20 15:08:27 +08:00
|
|
|
}
|
|
|
|
|
2008-08-05 13:39:11 +08:00
|
|
|
/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
|
2008-08-05 13:31:03 +08:00
|
|
|
enum net_xmit_qdisc_t {
|
|
|
|
__NET_XMIT_STOLEN = 0x00010000,
|
2008-08-05 13:39:11 +08:00
|
|
|
__NET_XMIT_BYPASS = 0x00020000,
|
2008-08-05 13:31:03 +08:00
|
|
|
};
|
|
|
|
|
2008-08-05 13:39:11 +08:00
|
|
|
#ifdef CONFIG_NET_CLS_ACT
|
2008-08-05 13:31:03 +08:00
|
|
|
#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
|
|
|
|
#else
|
|
|
|
#define net_xmit_drop_count(e) (1)
|
|
|
|
#endif
|
|
|
|
|
2011-01-20 11:48:19 +08:00
|
|
|
static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
|
|
|
|
const struct Qdisc *sch)
|
2008-07-20 15:08:04 +08:00
|
|
|
{
|
2008-07-21 09:13:01 +08:00
|
|
|
#ifdef CONFIG_NET_SCHED
|
2011-01-20 11:48:19 +08:00
|
|
|
struct qdisc_size_table *stab = rcu_dereference_bh(sch->stab);
|
|
|
|
|
|
|
|
if (stab)
|
|
|
|
__qdisc_calculate_pkt_len(skb, stab);
|
2008-07-21 09:13:01 +08:00
|
|
|
#endif
|
2011-01-20 11:48:19 +08:00
|
|
|
}
|
|
|
|
|
2016-06-22 14:16:49 +08:00
|
|
|
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|
|
|
struct sk_buff **to_free)
|
2011-01-20 11:48:19 +08:00
|
|
|
{
|
|
|
|
qdisc_calculate_pkt_len(skb, sch);
|
2016-06-22 14:16:49 +08:00
|
|
|
return sch->enqueue(skb, sch, to_free);
|
2008-07-20 15:08:04 +08:00
|
|
|
}
|
|
|
|
|
2014-09-29 02:52:56 +08:00
|
|
|
static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
|
|
|
|
{
|
|
|
|
return q->flags & TCQ_F_CPUSTATS;
|
|
|
|
}
|
2011-01-09 16:30:54 +08:00
|
|
|
|
2016-05-13 20:55:35 +08:00
|
|
|
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
|
|
|
|
__u64 bytes, __u32 packets)
|
|
|
|
{
|
|
|
|
bstats->bytes += bytes;
|
|
|
|
bstats->packets += packets;
|
|
|
|
}
|
|
|
|
|
2011-01-09 16:30:54 +08:00
|
|
|
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
|
|
|
|
const struct sk_buff *skb)
|
|
|
|
{
|
2016-05-13 20:55:35 +08:00
|
|
|
_bstats_update(bstats,
|
|
|
|
qdisc_pkt_len(skb),
|
|
|
|
skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void _bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
|
|
|
|
__u64 bytes, __u32 packets)
|
|
|
|
{
|
|
|
|
u64_stats_update_begin(&bstats->syncp);
|
|
|
|
_bstats_update(&bstats->bstats, bytes, packets);
|
|
|
|
u64_stats_update_end(&bstats->syncp);
|
2011-01-09 16:30:54 +08:00
|
|
|
}
|
|
|
|
|
2015-07-06 20:18:03 +08:00
|
|
|
static inline void bstats_cpu_update(struct gnet_stats_basic_cpu *bstats,
|
|
|
|
const struct sk_buff *skb)
|
2014-09-29 02:52:56 +08:00
|
|
|
{
|
|
|
|
u64_stats_update_begin(&bstats->syncp);
|
|
|
|
bstats_update(&bstats->bstats, skb);
|
|
|
|
u64_stats_update_end(&bstats->syncp);
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:18:03 +08:00
|
|
|
static inline void qdisc_bstats_cpu_update(struct Qdisc *sch,
|
|
|
|
const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
bstats_cpu_update(this_cpu_ptr(sch->cpu_bstats), skb);
|
|
|
|
}
|
|
|
|
|
2011-01-09 16:30:54 +08:00
|
|
|
static inline void qdisc_bstats_update(struct Qdisc *sch,
|
|
|
|
const struct sk_buff *skb)
|
2009-08-06 09:44:21 +08:00
|
|
|
{
|
2011-01-09 16:30:54 +08:00
|
|
|
bstats_update(&sch->bstats, skb);
|
2009-08-06 09:44:21 +08:00
|
|
|
}
|
|
|
|
|
2014-09-29 02:53:29 +08:00
|
|
|
static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
|
|
|
|
const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
sch->qstats.backlog -= qdisc_pkt_len(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
|
|
|
|
const struct sk_buff *skb)
|
|
|
|
{
|
|
|
|
sch->qstats.backlog += qdisc_pkt_len(skb);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
|
|
|
|
{
|
|
|
|
sch->qstats.drops += count;
|
|
|
|
}
|
|
|
|
|
2015-07-06 20:18:03 +08:00
|
|
|
static inline void qstats_drop_inc(struct gnet_stats_queue *qstats)
|
2014-09-29 02:53:29 +08:00
|
|
|
{
|
2015-07-06 20:18:03 +08:00
|
|
|
qstats->drops++;
|
2014-09-29 02:53:29 +08:00
|
|
|
}
|
|
|
|
|
2015-07-06 20:18:03 +08:00
|
|
|
static inline void qstats_overlimit_inc(struct gnet_stats_queue *qstats)
|
2014-09-29 02:54:24 +08:00
|
|
|
{
|
2015-07-06 20:18:03 +08:00
|
|
|
qstats->overlimits++;
|
|
|
|
}
|
2014-09-29 02:54:24 +08:00
|
|
|
|
2015-07-06 20:18:03 +08:00
|
|
|
static inline void qdisc_qstats_drop(struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
qstats_drop_inc(&sch->qstats);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void qdisc_qstats_cpu_drop(struct Qdisc *sch)
|
|
|
|
{
|
2016-08-25 01:23:34 +08:00
|
|
|
this_cpu_inc(sch->cpu_qstats->drops);
|
2014-09-29 02:54:24 +08:00
|
|
|
}
|
|
|
|
|
2014-09-29 02:53:29 +08:00
|
|
|
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
sch->qstats.overlimits++;
|
|
|
|
}
|
|
|
|
|
2016-09-18 06:57:34 +08:00
|
|
|
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
|
|
|
|
{
|
|
|
|
qh->head = NULL;
|
|
|
|
qh->tail = NULL;
|
|
|
|
qh->qlen = 0;
|
|
|
|
}
|
|
|
|
|
2005-06-19 13:57:26 +08:00
|
|
|
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
|
2016-09-18 06:57:34 +08:00
|
|
|
struct qdisc_skb_head *qh)
|
2005-06-19 13:57:26 +08:00
|
|
|
{
|
2016-09-18 06:57:34 +08:00
|
|
|
struct sk_buff *last = qh->tail;
|
|
|
|
|
|
|
|
if (last) {
|
|
|
|
skb->next = NULL;
|
|
|
|
last->next = skb;
|
|
|
|
qh->tail = skb;
|
|
|
|
} else {
|
|
|
|
qh->tail = skb;
|
|
|
|
qh->head = skb;
|
|
|
|
}
|
|
|
|
qh->qlen++;
|
2014-09-29 02:53:29 +08:00
|
|
|
qdisc_qstats_backlog_inc(sch, skb);
|
2005-06-19 13:57:26 +08:00
|
|
|
|
|
|
|
return NET_XMIT_SUCCESS;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
return __qdisc_enqueue_tail(skb, sch, &sch->q);
|
|
|
|
}
|
|
|
|
|
2016-09-18 06:57:34 +08:00
|
|
|
static inline struct sk_buff *__qdisc_dequeue_head(struct qdisc_skb_head *qh)
|
2005-06-19 13:57:26 +08:00
|
|
|
{
|
2016-09-18 06:57:34 +08:00
|
|
|
struct sk_buff *skb = qh->head;
|
|
|
|
|
|
|
|
if (likely(skb != NULL)) {
|
|
|
|
qh->head = skb->next;
|
|
|
|
qh->qlen--;
|
|
|
|
if (qh->head == NULL)
|
|
|
|
qh->tail = NULL;
|
|
|
|
skb->next = NULL;
|
|
|
|
}
|
2005-06-19 13:57:26 +08:00
|
|
|
|
2016-09-18 06:57:32 +08:00
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = __qdisc_dequeue_head(&sch->q);
|
|
|
|
|
2011-01-21 15:31:33 +08:00
|
|
|
if (likely(skb != NULL)) {
|
2014-09-29 02:53:29 +08:00
|
|
|
qdisc_qstats_backlog_dec(sch, skb);
|
2011-01-21 15:31:33 +08:00
|
|
|
qdisc_bstats_update(sch, skb);
|
|
|
|
}
|
2005-06-19 13:57:26 +08:00
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2016-06-22 14:16:49 +08:00
|
|
|
/* Instead of calling kfree_skb() while root qdisc lock is held,
|
|
|
|
* queue the skb for future freeing at end of __dev_xmit_skb()
|
|
|
|
*/
|
|
|
|
static inline void __qdisc_drop(struct sk_buff *skb, struct sk_buff **to_free)
|
|
|
|
{
|
|
|
|
skb->next = *to_free;
|
|
|
|
*to_free = skb;
|
|
|
|
}
|
|
|
|
|
2010-01-24 20:30:59 +08:00
|
|
|
static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
|
2016-09-18 06:57:34 +08:00
|
|
|
struct qdisc_skb_head *qh,
|
2016-06-22 14:16:49 +08:00
|
|
|
struct sk_buff **to_free)
|
2010-01-24 20:30:59 +08:00
|
|
|
{
|
2016-09-18 06:57:34 +08:00
|
|
|
struct sk_buff *skb = __qdisc_dequeue_head(qh);
|
2010-01-24 20:30:59 +08:00
|
|
|
|
|
|
|
if (likely(skb != NULL)) {
|
|
|
|
unsigned int len = qdisc_pkt_len(skb);
|
2016-06-22 14:16:49 +08:00
|
|
|
|
2014-09-29 02:53:29 +08:00
|
|
|
qdisc_qstats_backlog_dec(sch, skb);
|
2016-06-22 14:16:49 +08:00
|
|
|
__qdisc_drop(skb, to_free);
|
2010-01-24 20:30:59 +08:00
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2016-06-22 14:16:49 +08:00
|
|
|
static inline unsigned int qdisc_queue_drop_head(struct Qdisc *sch,
|
|
|
|
struct sk_buff **to_free)
|
2010-01-24 20:30:59 +08:00
|
|
|
{
|
2016-06-22 14:16:49 +08:00
|
|
|
return __qdisc_queue_drop_head(sch, &sch->q, to_free);
|
2005-06-19 13:57:26 +08:00
|
|
|
}
|
|
|
|
|
2008-10-31 15:44:18 +08:00
|
|
|
static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
|
|
|
|
{
|
2016-09-18 06:57:34 +08:00
|
|
|
const struct qdisc_skb_head *qh = &sch->q;
|
|
|
|
|
|
|
|
return qh->head;
|
2008-10-31 15:44:18 +08:00
|
|
|
}
|
|
|
|
|
2008-10-31 15:47:01 +08:00
|
|
|
/* generic pseudo peek method for non-work-conserving qdisc */
|
|
|
|
static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
/* we can reuse ->gso_skb because peek isn't called for root qdiscs */
|
2008-11-06 08:02:34 +08:00
|
|
|
if (!sch->gso_skb) {
|
2008-10-31 15:47:01 +08:00
|
|
|
sch->gso_skb = sch->dequeue(sch);
|
2016-06-04 06:05:57 +08:00
|
|
|
if (sch->gso_skb) {
|
2008-11-06 08:02:34 +08:00
|
|
|
/* it's still part of the queue */
|
2016-06-04 06:05:57 +08:00
|
|
|
qdisc_qstats_backlog_inc(sch, sch->gso_skb);
|
2008-11-06 08:02:34 +08:00
|
|
|
sch->q.qlen++;
|
2016-06-04 06:05:57 +08:00
|
|
|
}
|
2008-11-06 08:02:34 +08:00
|
|
|
}
|
2008-10-31 15:47:01 +08:00
|
|
|
|
|
|
|
return sch->gso_skb;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
|
|
|
|
static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
struct sk_buff *skb = sch->gso_skb;
|
|
|
|
|
2008-11-06 08:02:34 +08:00
|
|
|
if (skb) {
|
2008-10-31 15:47:01 +08:00
|
|
|
sch->gso_skb = NULL;
|
2016-06-04 06:05:57 +08:00
|
|
|
qdisc_qstats_backlog_dec(sch, skb);
|
2008-11-06 08:02:34 +08:00
|
|
|
sch->q.qlen--;
|
|
|
|
} else {
|
2008-10-31 15:47:01 +08:00
|
|
|
skb = sch->dequeue(sch);
|
2008-11-06 08:02:34 +08:00
|
|
|
}
|
2008-10-31 15:47:01 +08:00
|
|
|
|
|
|
|
return skb;
|
|
|
|
}
|
|
|
|
|
2016-09-18 06:57:34 +08:00
|
|
|
static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
|
2005-06-19 13:57:26 +08:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* We do not know the backlog in bytes of this list, it
|
|
|
|
* is up to the caller to correct it
|
|
|
|
*/
|
2016-09-18 06:57:34 +08:00
|
|
|
ASSERT_RTNL();
|
|
|
|
if (qh->qlen) {
|
|
|
|
rtnl_kfree_skbs(qh->head, qh->tail);
|
|
|
|
|
|
|
|
qh->head = NULL;
|
|
|
|
qh->tail = NULL;
|
|
|
|
qh->qlen = 0;
|
2016-06-14 11:21:50 +08:00
|
|
|
}
|
2005-06-19 13:57:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static inline void qdisc_reset_queue(struct Qdisc *sch)
|
|
|
|
{
|
2016-06-14 11:21:50 +08:00
|
|
|
__qdisc_reset_queue(&sch->q);
|
2005-06-19 13:57:26 +08:00
|
|
|
sch->qstats.backlog = 0;
|
|
|
|
}
|
|
|
|
|
2016-02-26 06:55:00 +08:00
|
|
|
static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
|
|
|
|
struct Qdisc **pold)
|
|
|
|
{
|
|
|
|
struct Qdisc *old;
|
|
|
|
|
|
|
|
sch_tree_lock(sch);
|
|
|
|
old = *pold;
|
|
|
|
*pold = new;
|
|
|
|
if (old != NULL) {
|
2017-08-19 20:37:07 +08:00
|
|
|
unsigned int qlen = old->q.qlen;
|
|
|
|
unsigned int backlog = old->qstats.backlog;
|
|
|
|
|
2016-02-26 06:55:00 +08:00
|
|
|
qdisc_reset(old);
|
2017-08-19 20:37:07 +08:00
|
|
|
qdisc_tree_reduce_backlog(old, qlen, backlog);
|
2016-02-26 06:55:00 +08:00
|
|
|
}
|
|
|
|
sch_tree_unlock(sch);
|
|
|
|
|
|
|
|
return old;
|
|
|
|
}
|
|
|
|
|
2016-06-14 11:21:50 +08:00
|
|
|
static inline void rtnl_qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
|
|
|
|
{
|
|
|
|
rtnl_kfree_skbs(skb, skb);
|
|
|
|
qdisc_qstats_drop(sch);
|
|
|
|
}
|
|
|
|
|
2016-06-22 14:16:49 +08:00
|
|
|
|
|
|
|
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch,
|
|
|
|
struct sk_buff **to_free)
|
2005-06-19 13:57:26 +08:00
|
|
|
{
|
2016-06-22 14:16:49 +08:00
|
|
|
__qdisc_drop(skb, to_free);
|
2014-09-29 02:53:29 +08:00
|
|
|
qdisc_qstats_drop(sch);
|
2005-06-19 13:57:26 +08:00
|
|
|
|
|
|
|
return NET_XMIT_DROP;
|
|
|
|
}
|
|
|
|
|
2007-09-12 22:35:24 +08:00
|
|
|
/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
|
|
|
|
long it will take to send a packet given its size.
|
|
|
|
*/
|
|
|
|
static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
|
|
|
|
{
|
2007-09-12 22:36:28 +08:00
|
|
|
int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
|
|
|
|
if (slot < 0)
|
|
|
|
slot = 0;
|
2007-09-12 22:35:24 +08:00
|
|
|
slot >>= rtab->rate.cell_log;
|
|
|
|
if (slot > 255)
|
2010-09-23 04:43:57 +08:00
|
|
|
return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF];
|
2007-09-12 22:35:24 +08:00
|
|
|
return rtab->data[slot];
|
|
|
|
}
|
|
|
|
|
2013-02-12 08:12:03 +08:00
|
|
|
struct psched_ratecfg {
|
2013-06-07 04:56:19 +08:00
|
|
|
u64 rate_bytes_ps; /* bytes per second */
|
2013-06-02 21:55:05 +08:00
|
|
|
u32 mult;
|
|
|
|
u16 overhead;
|
2013-08-15 05:47:11 +08:00
|
|
|
u8 linklayer;
|
2013-06-02 21:55:05 +08:00
|
|
|
u8 shift;
|
2013-02-12 08:12:03 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
static inline u64 psched_l2t_ns(const struct psched_ratecfg *r,
|
|
|
|
unsigned int len)
|
|
|
|
{
|
2013-08-15 05:47:11 +08:00
|
|
|
len += r->overhead;
|
|
|
|
|
|
|
|
if (unlikely(r->linklayer == TC_LINKLAYER_ATM))
|
|
|
|
return ((u64)(DIV_ROUND_UP(len,48)*53) * r->mult) >> r->shift;
|
|
|
|
|
|
|
|
return ((u64)len * r->mult) >> r->shift;
|
2013-02-12 08:12:03 +08:00
|
|
|
}
|
|
|
|
|
2013-07-31 13:47:13 +08:00
|
|
|
void psched_ratecfg_precompute(struct psched_ratecfg *r,
|
2013-09-20 00:10:03 +08:00
|
|
|
const struct tc_ratespec *conf,
|
|
|
|
u64 rate64);
|
2013-02-12 08:12:03 +08:00
|
|
|
|
2013-06-02 21:55:05 +08:00
|
|
|
static inline void psched_ratecfg_getrate(struct tc_ratespec *res,
|
|
|
|
const struct psched_ratecfg *r)
|
2013-02-12 08:12:03 +08:00
|
|
|
{
|
2013-06-02 21:55:05 +08:00
|
|
|
memset(res, 0, sizeof(*res));
|
2013-09-20 00:10:03 +08:00
|
|
|
|
|
|
|
/* legacy struct tc_ratespec has a 32bit @rate field
|
|
|
|
* Qdisc using 64bit rate should add new attributes
|
|
|
|
* in order to maintain compatibility.
|
|
|
|
*/
|
|
|
|
res->rate = min_t(u64, r->rate_bytes_ps, ~0U);
|
|
|
|
|
2013-06-02 21:55:05 +08:00
|
|
|
res->overhead = r->overhead;
|
2013-08-15 05:47:11 +08:00
|
|
|
res->linklayer = (r->linklayer & TC_LINKLAYER_MASK);
|
2013-02-12 08:12:03 +08:00
|
|
|
}
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|