Merge branch 'net-sched-do-not-drop-root-lock-in-tcf_qevent_handle'
Petr Machata says: ==================== net: sched: Do not drop root lock in tcf_qevent_handle() Mirred currently does not mix well with blocks executed after the qdisc root lock is taken. This includes classification blocks (such as in PRIO, ETS, DRR qdiscs) and qevents. The locking caused by the packet mirrored by mirred can cause deadlocks: either when the thread of execution attempts to take the lock a second time, or when two threads end up waiting on each other's locks. The qevent patchset attempted to not introduce further badness of this sort, and dropped the lock before executing the qevent block. However this lead to too little locking and races between qdisc configuration and packet enqueue in the RED qdisc. Before the deadlock issues are solved in a way that can be applied across many qdiscs reasonably easily, do for qevents what is done for the classification blocks and just keep holding the root lock. That is done in patch #1. Patch #2 then drops the now unnecessary root_lock argument from Qdisc_ops.enqueue. ==================== Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
4291dc1a56
|
@ -568,7 +568,7 @@ void tcf_qevent_destroy(struct tcf_qevent *qe, struct Qdisc *sch);
|
|||
int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index_attr,
|
||||
struct netlink_ext_ack *extack);
|
||||
struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
|
||||
spinlock_t *root_lock, struct sk_buff **to_free, int *ret);
|
||||
struct sk_buff **to_free, int *ret);
|
||||
int tcf_qevent_dump(struct sk_buff *skb, int attr_name, struct tcf_qevent *qe);
|
||||
#else
|
||||
static inline int tcf_qevent_init(struct tcf_qevent *qe, struct Qdisc *sch,
|
||||
|
@ -591,7 +591,7 @@ static inline int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlatt
|
|||
|
||||
static inline struct sk_buff *
|
||||
tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
|
||||
spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
|
||||
struct sk_buff **to_free, int *ret)
|
||||
{
|
||||
return skb;
|
||||
}
|
||||
|
|
|
@ -57,7 +57,6 @@ struct qdisc_skb_head {
|
|||
struct Qdisc {
|
||||
int (*enqueue)(struct sk_buff *skb,
|
||||
struct Qdisc *sch,
|
||||
spinlock_t *root_lock,
|
||||
struct sk_buff **to_free);
|
||||
struct sk_buff * (*dequeue)(struct Qdisc *sch);
|
||||
unsigned int flags;
|
||||
|
@ -242,7 +241,6 @@ struct Qdisc_ops {
|
|||
|
||||
int (*enqueue)(struct sk_buff *skb,
|
||||
struct Qdisc *sch,
|
||||
spinlock_t *root_lock,
|
||||
struct sk_buff **to_free);
|
||||
struct sk_buff * (*dequeue)(struct Qdisc *);
|
||||
struct sk_buff * (*peek)(struct Qdisc *);
|
||||
|
@ -790,11 +788,11 @@ static inline void qdisc_calculate_pkt_len(struct sk_buff *skb,
|
|||
#endif
|
||||
}
|
||||
|
||||
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
qdisc_calculate_pkt_len(skb, sch);
|
||||
return sch->enqueue(skb, sch, root_lock, to_free);
|
||||
return sch->enqueue(skb, sch, to_free);
|
||||
}
|
||||
|
||||
static inline void _bstats_update(struct gnet_stats_basic_packed *bstats,
|
||||
|
|
|
@ -3749,7 +3749,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|||
qdisc_calculate_pkt_len(skb, q);
|
||||
|
||||
if (q->flags & TCQ_F_NOLOCK) {
|
||||
rc = q->enqueue(skb, q, NULL, &to_free) & NET_XMIT_MASK;
|
||||
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
|
||||
qdisc_run(q);
|
||||
|
||||
if (unlikely(to_free))
|
||||
|
@ -3792,7 +3792,7 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
|
|||
qdisc_run_end(q);
|
||||
rc = NET_XMIT_SUCCESS;
|
||||
} else {
|
||||
rc = q->enqueue(skb, q, root_lock, &to_free) & NET_XMIT_MASK;
|
||||
rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
|
||||
if (qdisc_run_begin(q)) {
|
||||
if (unlikely(contended)) {
|
||||
spin_unlock(&q->busylock);
|
||||
|
|
|
@ -3822,7 +3822,7 @@ int tcf_qevent_validate_change(struct tcf_qevent *qe, struct nlattr *block_index
|
|||
EXPORT_SYMBOL(tcf_qevent_validate_change);
|
||||
|
||||
struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, struct sk_buff *skb,
|
||||
spinlock_t *root_lock, struct sk_buff **to_free, int *ret)
|
||||
struct sk_buff **to_free, int *ret)
|
||||
{
|
||||
struct tcf_result cl_res;
|
||||
struct tcf_proto *fl;
|
||||
|
@ -3832,9 +3832,6 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru
|
|||
|
||||
fl = rcu_dereference_bh(qe->filter_chain);
|
||||
|
||||
if (root_lock)
|
||||
spin_unlock(root_lock);
|
||||
|
||||
switch (tcf_classify(skb, fl, &cl_res, false)) {
|
||||
case TC_ACT_SHOT:
|
||||
qdisc_qstats_drop(sch);
|
||||
|
@ -3853,9 +3850,6 @@ struct sk_buff *tcf_qevent_handle(struct tcf_qevent *qe, struct Qdisc *sch, stru
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (root_lock)
|
||||
spin_lock(root_lock);
|
||||
|
||||
return skb;
|
||||
}
|
||||
EXPORT_SYMBOL(tcf_qevent_handle);
|
||||
|
|
|
@ -374,7 +374,7 @@ static struct tcf_block *atm_tc_tcf_block(struct Qdisc *sch, unsigned long cl,
|
|||
|
||||
/* --------------------------- Qdisc operations ---------------------------- */
|
||||
|
||||
static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int atm_tc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct atm_qdisc_data *p = qdisc_priv(sch);
|
||||
|
@ -432,7 +432,7 @@ done:
|
|||
#endif
|
||||
}
|
||||
|
||||
ret = qdisc_enqueue(skb, flow->q, root_lock, to_free);
|
||||
ret = qdisc_enqueue(skb, flow->q, to_free);
|
||||
if (ret != NET_XMIT_SUCCESS) {
|
||||
drop: __maybe_unused
|
||||
if (net_xmit_drop_count(ret)) {
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
#include <linux/skbuff.h>
|
||||
#include <net/pkt_sched.h>
|
||||
|
||||
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
qdisc_drop(skb, sch, to_free);
|
||||
|
|
|
@ -1687,7 +1687,7 @@ hash:
|
|||
|
||||
static void cake_reconfigure(struct Qdisc *sch);
|
||||
|
||||
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct cake_sched_data *q = qdisc_priv(sch);
|
||||
|
|
|
@ -356,7 +356,7 @@ cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
|
|||
}
|
||||
|
||||
static int
|
||||
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct cbq_sched_data *q = qdisc_priv(sch);
|
||||
|
@ -373,7 +373,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = qdisc_enqueue(skb, cl->q, root_lock, to_free);
|
||||
ret = qdisc_enqueue(skb, cl->q, to_free);
|
||||
if (ret == NET_XMIT_SUCCESS) {
|
||||
sch->q.qlen++;
|
||||
cbq_mark_toplevel(q, cl);
|
||||
|
|
|
@ -77,7 +77,7 @@ struct cbs_sched_data {
|
|||
s64 sendslope; /* in bytes/s */
|
||||
s64 idleslope; /* in bytes/s */
|
||||
struct qdisc_watchdog watchdog;
|
||||
int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
int (*enqueue)(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free);
|
||||
struct sk_buff *(*dequeue)(struct Qdisc *sch);
|
||||
struct Qdisc *qdisc;
|
||||
|
@ -85,13 +85,13 @@ struct cbs_sched_data {
|
|||
};
|
||||
|
||||
static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct Qdisc *child, spinlock_t *root_lock,
|
||||
struct Qdisc *child,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
int err;
|
||||
|
||||
err = child->ops->enqueue(skb, child, root_lock, to_free);
|
||||
err = child->ops->enqueue(skb, child, to_free);
|
||||
if (err != NET_XMIT_SUCCESS)
|
||||
return err;
|
||||
|
||||
|
@ -101,16 +101,16 @@ static int cbs_child_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
||||
static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int cbs_enqueue_offload(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct cbs_sched_data *q = qdisc_priv(sch);
|
||||
struct Qdisc *qdisc = q->qdisc;
|
||||
|
||||
return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free);
|
||||
return cbs_child_enqueue(skb, sch, qdisc, to_free);
|
||||
}
|
||||
|
||||
static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct cbs_sched_data *q = qdisc_priv(sch);
|
||||
|
@ -124,15 +124,15 @@ static int cbs_enqueue_soft(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *
|
|||
q->last = ktime_get_ns();
|
||||
}
|
||||
|
||||
return cbs_child_enqueue(skb, sch, qdisc, root_lock, to_free);
|
||||
return cbs_child_enqueue(skb, sch, qdisc, to_free);
|
||||
}
|
||||
|
||||
static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int cbs_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct cbs_sched_data *q = qdisc_priv(sch);
|
||||
|
||||
return q->enqueue(skb, sch, root_lock, to_free);
|
||||
return q->enqueue(skb, sch, to_free);
|
||||
}
|
||||
|
||||
/* timediff is in ns, slope is in bytes/s */
|
||||
|
|
|
@ -210,7 +210,7 @@ static bool choke_match_random(const struct choke_sched_data *q,
|
|||
return choke_match_flow(oskb, nskb);
|
||||
}
|
||||
|
||||
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct choke_sched_data *q = qdisc_priv(sch);
|
||||
|
|
|
@ -108,7 +108,7 @@ static struct sk_buff *codel_qdisc_dequeue(struct Qdisc *sch)
|
|||
return skb;
|
||||
}
|
||||
|
||||
static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int codel_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct codel_sched_data *q;
|
||||
|
|
|
@ -337,7 +337,7 @@ static struct drr_class *drr_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
|
@ -355,7 +355,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
}
|
||||
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
if (net_xmit_drop_count(err)) {
|
||||
cl->qstats.drops++;
|
||||
|
|
|
@ -198,7 +198,7 @@ static struct tcf_block *dsmark_tcf_block(struct Qdisc *sch, unsigned long cl,
|
|||
|
||||
/* --------------------------- Qdisc operations ---------------------------- */
|
||||
|
||||
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
|
@ -267,7 +267,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *ro
|
|||
}
|
||||
}
|
||||
|
||||
err = qdisc_enqueue(skb, p->q, root_lock, to_free);
|
||||
err = qdisc_enqueue(skb, p->q, to_free);
|
||||
if (err != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(err))
|
||||
qdisc_qstats_drop(sch);
|
||||
|
|
|
@ -160,7 +160,7 @@ static void report_sock_error(struct sk_buff *skb, u32 err, u8 code)
|
|||
}
|
||||
|
||||
static int etf_enqueue_timesortedlist(struct sk_buff *nskb, struct Qdisc *sch,
|
||||
spinlock_t *root_lock, struct sk_buff **to_free)
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct etf_sched_data *q = qdisc_priv(sch);
|
||||
struct rb_node **p = &q->head.rb_root.rb_node, *parent = NULL;
|
||||
|
|
|
@ -415,7 +415,7 @@ static struct ets_class *ets_classify(struct sk_buff *skb, struct Qdisc *sch,
|
|||
return &q->classes[band];
|
||||
}
|
||||
|
||||
static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
|
@ -433,7 +433,7 @@ static int ets_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t
|
|||
}
|
||||
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
if (net_xmit_drop_count(err)) {
|
||||
cl->qstats.drops++;
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
/* 1 band FIFO pseudo-"scheduler" */
|
||||
|
||||
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit))
|
||||
|
@ -25,7 +25,7 @@ static int bfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo
|
|||
return qdisc_drop(skb, sch, to_free);
|
||||
}
|
||||
|
||||
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
if (likely(sch->q.qlen < sch->limit))
|
||||
|
@ -34,7 +34,7 @@ static int pfifo_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo
|
|||
return qdisc_drop(skb, sch, to_free);
|
||||
}
|
||||
|
||||
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int prev_backlog;
|
||||
|
|
|
@ -439,7 +439,7 @@ static bool fq_packet_beyond_horizon(const struct sk_buff *skb,
|
|||
return unlikely((s64)skb->tstamp > (s64)(q->ktime_cache + q->horizon));
|
||||
}
|
||||
|
||||
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct fq_sched_data *q = qdisc_priv(sch);
|
||||
|
|
|
@ -181,7 +181,7 @@ static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
|
|||
return idx;
|
||||
}
|
||||
|
||||
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct fq_codel_sched_data *q = qdisc_priv(sch);
|
||||
|
|
|
@ -125,7 +125,7 @@ static inline void flow_queue_add(struct fq_pie_flow *flow,
|
|||
skb->next = NULL;
|
||||
}
|
||||
|
||||
static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int fq_pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct fq_pie_sched_data *q = qdisc_priv(sch);
|
||||
|
|
|
@ -520,7 +520,7 @@ EXPORT_SYMBOL(netif_carrier_off);
|
|||
cheaper.
|
||||
*/
|
||||
|
||||
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock,
|
||||
static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
__qdisc_drop(skb, to_free);
|
||||
|
@ -614,7 +614,7 @@ static inline struct skb_array *band2list(struct pfifo_fast_priv *priv,
|
|||
return &priv->q[band];
|
||||
}
|
||||
|
||||
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, spinlock_t *root_lock,
|
||||
static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc *qdisc,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
int band = prio2band[skb->priority & TC_PRIO_MAX];
|
||||
|
|
|
@ -161,7 +161,7 @@ static bool gred_per_vq_red_flags_used(struct gred_sched *table)
|
|||
return false;
|
||||
}
|
||||
|
||||
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct gred_sched_data *q = NULL;
|
||||
|
|
|
@ -1528,8 +1528,8 @@ hfsc_dump_qdisc(struct Qdisc *sch, struct sk_buff *skb)
|
|||
return -1;
|
||||
}
|
||||
|
||||
static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
struct sk_buff **to_free)
|
||||
static int
|
||||
hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct hfsc_class *cl;
|
||||
|
@ -1545,7 +1545,7 @@ static int hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root
|
|||
}
|
||||
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
if (net_xmit_drop_count(err)) {
|
||||
cl->qstats.drops++;
|
||||
|
|
|
@ -368,7 +368,7 @@ static unsigned int hhf_drop(struct Qdisc *sch, struct sk_buff **to_free)
|
|||
return bucket - q->buckets;
|
||||
}
|
||||
|
||||
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct hhf_sched_data *q = qdisc_priv(sch);
|
||||
|
|
|
@ -576,7 +576,7 @@ static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
|
|||
cl->prio_activity = 0;
|
||||
}
|
||||
|
||||
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
int uninitialized_var(ret);
|
||||
|
@ -599,7 +599,7 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
__qdisc_drop(skb, to_free);
|
||||
return ret;
|
||||
#endif
|
||||
} else if ((ret = qdisc_enqueue(skb, cl->leaf.q, root_lock,
|
||||
} else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
|
||||
to_free)) != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(ret)) {
|
||||
qdisc_qstats_drop(sch);
|
||||
|
|
|
@ -57,7 +57,7 @@ multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||
}
|
||||
|
||||
static int
|
||||
multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct Qdisc *qdisc;
|
||||
|
@ -74,7 +74,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
|||
}
|
||||
#endif
|
||||
|
||||
ret = qdisc_enqueue(skb, qdisc, root_lock, to_free);
|
||||
ret = qdisc_enqueue(skb, qdisc, to_free);
|
||||
if (ret == NET_XMIT_SUCCESS) {
|
||||
sch->q.qlen++;
|
||||
return NET_XMIT_SUCCESS;
|
||||
|
|
|
@ -431,7 +431,7 @@ static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
|
|||
* NET_XMIT_DROP: queue length didn't change.
|
||||
* NET_XMIT_SUCCESS: one skb was queued.
|
||||
*/
|
||||
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct netem_sched_data *q = qdisc_priv(sch);
|
||||
|
@ -480,7 +480,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *roo
|
|||
u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
|
||||
|
||||
q->duplicate = 0;
|
||||
rootq->enqueue(skb2, rootq, root_lock, to_free);
|
||||
rootq->enqueue(skb2, rootq, to_free);
|
||||
q->duplicate = dupsave;
|
||||
rc_drop = NET_XMIT_SUCCESS;
|
||||
}
|
||||
|
@ -604,7 +604,7 @@ finish_segs:
|
|||
skb_mark_not_on_list(segs);
|
||||
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
||||
last_len = segs->len;
|
||||
rc = qdisc_enqueue(segs, sch, root_lock, to_free);
|
||||
rc = qdisc_enqueue(segs, sch, to_free);
|
||||
if (rc != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(rc))
|
||||
qdisc_qstats_drop(sch);
|
||||
|
@ -720,7 +720,7 @@ deliver:
|
|||
struct sk_buff *to_free = NULL;
|
||||
int err;
|
||||
|
||||
err = qdisc_enqueue(skb, q->qdisc, NULL, &to_free);
|
||||
err = qdisc_enqueue(skb, q->qdisc, &to_free);
|
||||
kfree_skb_list(to_free);
|
||||
if (err != NET_XMIT_SUCCESS &&
|
||||
net_xmit_drop_count(err)) {
|
||||
|
|
|
@ -82,7 +82,7 @@ bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
|
|||
}
|
||||
EXPORT_SYMBOL_GPL(pie_drop_early);
|
||||
|
||||
static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int pie_qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct pie_sched_data *q = qdisc_priv(sch);
|
||||
|
|
|
@ -84,7 +84,7 @@ struct plug_sched_data {
|
|||
u32 pkts_to_release;
|
||||
};
|
||||
|
||||
static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int plug_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct plug_sched_data *q = qdisc_priv(sch);
|
||||
|
|
|
@ -65,8 +65,8 @@ prio_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
|
|||
return q->queues[band];
|
||||
}
|
||||
|
||||
static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
struct sk_buff **to_free)
|
||||
static int
|
||||
prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb);
|
||||
struct Qdisc *qdisc;
|
||||
|
@ -83,7 +83,7 @@ static int prio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root
|
|||
}
|
||||
#endif
|
||||
|
||||
ret = qdisc_enqueue(skb, qdisc, root_lock, to_free);
|
||||
ret = qdisc_enqueue(skb, qdisc, to_free);
|
||||
if (ret == NET_XMIT_SUCCESS) {
|
||||
sch->qstats.backlog += len;
|
||||
sch->q.qlen++;
|
||||
|
|
|
@ -1194,7 +1194,7 @@ static struct qfq_aggregate *qfq_choose_next_agg(struct qfq_sched *q)
|
|||
return agg;
|
||||
}
|
||||
|
||||
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
unsigned int len = qdisc_pkt_len(skb), gso_segs;
|
||||
|
@ -1225,7 +1225,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
|
||||
gso_segs = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
|
||||
first = !cl->qdisc->q.qlen;
|
||||
err = qdisc_enqueue(skb, cl->qdisc, root_lock, to_free);
|
||||
err = qdisc_enqueue(skb, cl->qdisc, to_free);
|
||||
if (unlikely(err != NET_XMIT_SUCCESS)) {
|
||||
pr_debug("qfq_enqueue: enqueue failed %d\n", err);
|
||||
if (net_xmit_drop_count(err)) {
|
||||
|
|
|
@ -67,7 +67,7 @@ static int red_use_nodrop(struct red_sched_data *q)
|
|||
return q->flags & TC_RED_NODROP;
|
||||
}
|
||||
|
||||
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct red_sched_data *q = qdisc_priv(sch);
|
||||
|
@ -94,7 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
|
||||
if (INET_ECN_set_ce(skb)) {
|
||||
q->stats.prob_mark++;
|
||||
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
|
||||
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
|
||||
if (!skb)
|
||||
return NET_XMIT_CN | ret;
|
||||
} else if (!red_use_nodrop(q)) {
|
||||
|
@ -114,7 +114,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
|
||||
if (INET_ECN_set_ce(skb)) {
|
||||
q->stats.forced_mark++;
|
||||
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, root_lock, to_free, &ret);
|
||||
skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
|
||||
if (!skb)
|
||||
return NET_XMIT_CN | ret;
|
||||
} else if (!red_use_nodrop(q)) {
|
||||
|
@ -126,7 +126,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
break;
|
||||
}
|
||||
|
||||
ret = qdisc_enqueue(skb, child, root_lock, to_free);
|
||||
ret = qdisc_enqueue(skb, child, to_free);
|
||||
if (likely(ret == NET_XMIT_SUCCESS)) {
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->q.qlen++;
|
||||
|
@ -137,7 +137,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
return ret;
|
||||
|
||||
congestion_drop:
|
||||
skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, root_lock, to_free, &ret);
|
||||
skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
|
||||
if (!skb)
|
||||
return NET_XMIT_CN | ret;
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
|
|||
return false;
|
||||
}
|
||||
|
||||
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
|
||||
|
@ -399,7 +399,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
}
|
||||
|
||||
enqueue:
|
||||
ret = qdisc_enqueue(skb, child, root_lock, to_free);
|
||||
ret = qdisc_enqueue(skb, child, to_free);
|
||||
if (likely(ret == NET_XMIT_SUCCESS)) {
|
||||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->q.qlen++;
|
||||
|
|
|
@ -343,7 +343,7 @@ static int sfq_headdrop(const struct sfq_sched_data *q)
|
|||
}
|
||||
|
||||
static int
|
||||
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock, struct sk_buff **to_free)
|
||||
sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
{
|
||||
struct sfq_sched_data *q = qdisc_priv(sch);
|
||||
unsigned int hash, dropped;
|
||||
|
|
|
@ -65,7 +65,7 @@ static u16 calc_new_low_prio(const struct skbprio_sched_data *q)
|
|||
return SKBPRIO_MAX_PRIORITY - 1;
|
||||
}
|
||||
|
||||
static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int skbprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
const unsigned int max_priority = SKBPRIO_MAX_PRIORITY - 1;
|
||||
|
|
|
@ -410,7 +410,7 @@ done:
|
|||
return txtime;
|
||||
}
|
||||
|
||||
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct taprio_sched *q = qdisc_priv(sch);
|
||||
|
@ -435,7 +435,7 @@ static int taprio_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *ro
|
|||
qdisc_qstats_backlog_inc(sch, skb);
|
||||
sch->q.qlen++;
|
||||
|
||||
return qdisc_enqueue(skb, child, root_lock, to_free);
|
||||
return qdisc_enqueue(skb, child, to_free);
|
||||
}
|
||||
|
||||
static struct sk_buff *taprio_peek_soft(struct Qdisc *sch)
|
||||
|
|
|
@ -187,7 +187,7 @@ static int tbf_offload_dump(struct Qdisc *sch)
|
|||
/* GSO packet is too big, segment it so that tbf can transmit
|
||||
* each segment in time
|
||||
*/
|
||||
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct tbf_sched_data *q = qdisc_priv(sch);
|
||||
|
@ -206,7 +206,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
skb_mark_not_on_list(segs);
|
||||
qdisc_skb_cb(segs)->pkt_len = segs->len;
|
||||
len += segs->len;
|
||||
ret = qdisc_enqueue(segs, q->qdisc, root_lock, to_free);
|
||||
ret = qdisc_enqueue(segs, q->qdisc, to_free);
|
||||
if (ret != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(ret))
|
||||
qdisc_qstats_drop(sch);
|
||||
|
@ -221,7 +221,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
return nb > 0 ? NET_XMIT_SUCCESS : NET_XMIT_DROP;
|
||||
}
|
||||
|
||||
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch,
|
||||
struct sk_buff **to_free)
|
||||
{
|
||||
struct tbf_sched_data *q = qdisc_priv(sch);
|
||||
|
@ -231,10 +231,10 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_
|
|||
if (qdisc_pkt_len(skb) > q->max_size) {
|
||||
if (skb_is_gso(skb) &&
|
||||
skb_gso_validate_mac_len(skb, q->max_size))
|
||||
return tbf_segment(skb, sch, root_lock, to_free);
|
||||
return tbf_segment(skb, sch, to_free);
|
||||
return qdisc_drop(skb, sch, to_free);
|
||||
}
|
||||
ret = qdisc_enqueue(skb, q->qdisc, root_lock, to_free);
|
||||
ret = qdisc_enqueue(skb, q->qdisc, to_free);
|
||||
if (ret != NET_XMIT_SUCCESS) {
|
||||
if (net_xmit_drop_count(ret))
|
||||
qdisc_qstats_drop(sch);
|
||||
|
|
|
@ -72,8 +72,8 @@ struct teql_sched_data {
|
|||
|
||||
/* "teql*" qdisc routines */
|
||||
|
||||
static int teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, spinlock_t *root_lock,
|
||||
struct sk_buff **to_free)
|
||||
static int
|
||||
teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
||||
{
|
||||
struct net_device *dev = qdisc_dev(sch);
|
||||
struct teql_sched_data *q = qdisc_priv(sch);
|
||||
|
|
Loading…
Reference in New Issue