net: sched: implement qstat helper routines

This adds helpers to manipulate qstats logic and replaces locations
that touch the counters directly. This simplifies future patches
to push qstats onto per cpu counters.

Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
John Fastabend 2014-09-28 11:53:29 -07:00 committed by David S. Miller
parent 22e0f8b932
commit 25331d6ce4
25 changed files with 108 additions and 81 deletions

View File

@ -521,11 +521,38 @@ static inline void qdisc_bstats_update(struct Qdisc *sch,
bstats_update(&sch->bstats, skb); bstats_update(&sch->bstats, skb);
} }
static inline void qdisc_qstats_backlog_dec(struct Qdisc *sch,
const struct sk_buff *skb)
{
sch->qstats.backlog -= qdisc_pkt_len(skb);
}
static inline void qdisc_qstats_backlog_inc(struct Qdisc *sch,
const struct sk_buff *skb)
{
sch->qstats.backlog += qdisc_pkt_len(skb);
}
static inline void __qdisc_qstats_drop(struct Qdisc *sch, int count)
{
sch->qstats.drops += count;
}
static inline void qdisc_qstats_drop(struct Qdisc *sch)
{
sch->qstats.drops++;
}
static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
{
sch->qstats.overlimits++;
}
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff_head *list) struct sk_buff_head *list)
{ {
__skb_queue_tail(list, skb); __skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb); qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
@ -541,7 +568,7 @@ static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
struct sk_buff *skb = __skb_dequeue(list); struct sk_buff *skb = __skb_dequeue(list);
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
} }
@ -560,7 +587,7 @@ static inline unsigned int __qdisc_queue_drop_head(struct Qdisc *sch,
if (likely(skb != NULL)) { if (likely(skb != NULL)) {
unsigned int len = qdisc_pkt_len(skb); unsigned int len = qdisc_pkt_len(skb);
sch->qstats.backlog -= len; qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb); kfree_skb(skb);
return len; return len;
} }
@ -579,7 +606,7 @@ static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
struct sk_buff *skb = __skb_dequeue_tail(list); struct sk_buff *skb = __skb_dequeue_tail(list);
if (likely(skb != NULL)) if (likely(skb != NULL))
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
return skb; return skb;
} }
@ -661,14 +688,14 @@ static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch) static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
{ {
kfree_skb(skb); kfree_skb(skb);
sch->qstats.drops++; qdisc_qstats_drop(sch);
return NET_XMIT_DROP; return NET_XMIT_DROP;
} }
static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch) static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
{ {
sch->qstats.drops++; qdisc_qstats_drop(sch);
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch)) if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))

View File

@ -763,7 +763,7 @@ void qdisc_tree_decrease_qlen(struct Qdisc *sch, unsigned int n)
cops->put(sch, cl); cops->put(sch, cl);
} }
sch->q.qlen -= n; sch->q.qlen -= n;
sch->qstats.drops += drops; __qdisc_qstats_drop(sch, drops);
} }
} }
EXPORT_SYMBOL(qdisc_tree_decrease_qlen); EXPORT_SYMBOL(qdisc_tree_decrease_qlen);

View File

@ -417,7 +417,7 @@ done:
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
drop: __maybe_unused drop: __maybe_unused
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; qdisc_qstats_drop(sch);
if (flow) if (flow)
flow->qstats.drops++; flow->qstats.drops++;
} }

View File

@ -377,7 +377,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#endif #endif
if (cl == NULL) { if (cl == NULL) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
} }
@ -395,7 +395,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; qdisc_qstats_drop(sch);
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
cl->qstats.drops++; cl->qstats.drops++;
} }
@ -650,11 +650,11 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
return 0; return 0;
} }
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
sch->qstats.drops++; qdisc_qstats_drop(sch);
return 0; return 0;
} }
sch->qstats.drops++; qdisc_qstats_drop(sch);
return -1; return -1;
} }
#endif #endif
@ -995,7 +995,7 @@ cbq_dequeue(struct Qdisc *sch)
*/ */
if (sch->q.qlen) { if (sch->q.qlen) {
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (q->wd_expires) if (q->wd_expires)
qdisc_watchdog_schedule(&q->watchdog, qdisc_watchdog_schedule(&q->watchdog,
now + q->wd_expires); now + q->wd_expires);

View File

@ -127,7 +127,7 @@ static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx)
if (idx == q->tail) if (idx == q->tail)
choke_zap_tail_holes(q); choke_zap_tail_holes(q);
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
qdisc_tree_decrease_qlen(sch, 1); qdisc_tree_decrease_qlen(sch, 1);
--sch->q.qlen; --sch->q.qlen;
@ -302,7 +302,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (q->vars.qavg > p->qth_max) { if (q->vars.qavg > p->qth_max) {
q->vars.qcount = -1; q->vars.qcount = -1;
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (use_harddrop(q) || !use_ecn(q) || if (use_harddrop(q) || !use_ecn(q) ||
!INET_ECN_set_ce(skb)) { !INET_ECN_set_ce(skb)) {
q->stats.forced_drop++; q->stats.forced_drop++;
@ -315,7 +315,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->vars.qcount = 0; q->vars.qcount = 0;
q->vars.qR = red_random(p); q->vars.qR = red_random(p);
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (!use_ecn(q) || !INET_ECN_set_ce(skb)) { if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++; q->stats.prob_drop++;
goto congestion_drop; goto congestion_drop;
@ -332,7 +332,7 @@ static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch)
q->tab[q->tail] = skb; q->tab[q->tail] = skb;
q->tail = (q->tail + 1) & q->tab_mask; q->tail = (q->tail + 1) & q->tab_mask;
++sch->q.qlen; ++sch->q.qlen;
sch->qstats.backlog += qdisc_pkt_len(skb); qdisc_qstats_backlog_inc(sch, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
@ -345,7 +345,7 @@ congestion_drop:
other_drop: other_drop:
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
} }
@ -365,7 +365,7 @@ static struct sk_buff *choke_dequeue(struct Qdisc *sch)
q->tab[q->head] = NULL; q->tab[q->head] = NULL;
choke_zap_head_holes(q); choke_zap_head_holes(q);
--sch->q.qlen; --sch->q.qlen;
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
return skb; return skb;
@ -460,7 +460,7 @@ static int choke_change(struct Qdisc *sch, struct nlattr *opt)
ntab[tail++] = skb; ntab[tail++] = skb;
continue; continue;
} }
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
--sch->q.qlen; --sch->q.qlen;
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
} }

View File

@ -149,7 +149,7 @@ static int codel_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) { while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q); struct sk_buff *skb = __skb_dequeue(&sch->q);
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
} }
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);

View File

@ -360,7 +360,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = drr_classify(skb, sch, &err); cl = drr_classify(skb, sch, &err);
if (cl == NULL) { if (cl == NULL) {
if (err & __NET_XMIT_BYPASS) if (err & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return err; return err;
} }
@ -369,7 +369,7 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
sch->qstats.drops++; qdisc_qstats_drop(sch);
} }
return err; return err;
} }

View File

@ -258,7 +258,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
err = qdisc_enqueue(skb, p->q); err = qdisc_enqueue(skb, p->q);
if (err != NET_XMIT_SUCCESS) { if (err != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(err)) if (net_xmit_drop_count(err))
sch->qstats.drops++; qdisc_qstats_drop(sch);
return err; return err;
} }

View File

@ -42,7 +42,7 @@ static int pfifo_tail_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* queue full, remove one skb to fulfill the limit */ /* queue full, remove one skb to fulfill the limit */
__qdisc_queue_drop_head(sch, &sch->q); __qdisc_queue_drop_head(sch, &sch->q);
sch->qstats.drops++; qdisc_qstats_drop(sch);
qdisc_enqueue_tail(skb, sch); qdisc_enqueue_tail(skb, sch);
return NET_XMIT_CN; return NET_XMIT_CN;

View File

@ -290,7 +290,7 @@ static struct sk_buff *fq_dequeue_head(struct Qdisc *sch, struct fq_flow *flow)
flow->head = skb->next; flow->head = skb->next;
skb->next = NULL; skb->next = NULL;
flow->qlen--; flow->qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
sch->q.qlen--; sch->q.qlen--;
} }
return skb; return skb;
@ -371,7 +371,7 @@ static int fq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
f->qlen++; f->qlen++;
if (skb_is_retransmit(skb)) if (skb_is_retransmit(skb))
q->stat_tcp_retrans++; q->stat_tcp_retrans++;
sch->qstats.backlog += qdisc_pkt_len(skb); qdisc_qstats_backlog_inc(sch, skb);
if (fq_flow_is_detached(f)) { if (fq_flow_is_detached(f)) {
fq_flow_add_tail(&q->new_flows, f); fq_flow_add_tail(&q->new_flows, f);
if (time_after(jiffies, f->age + q->flow_refill_delay)) if (time_after(jiffies, f->age + q->flow_refill_delay))

View File

@ -164,8 +164,8 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
q->backlogs[idx] -= len; q->backlogs[idx] -= len;
kfree_skb(skb); kfree_skb(skb);
sch->q.qlen--; sch->q.qlen--;
sch->qstats.drops++; qdisc_qstats_drop(sch);
sch->qstats.backlog -= len; qdisc_qstats_backlog_dec(sch, skb);
flow->dropped++; flow->dropped++;
return idx; return idx;
} }
@ -180,7 +180,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
idx = fq_codel_classify(skb, sch, &ret); idx = fq_codel_classify(skb, sch, &ret);
if (idx == 0) { if (idx == 0) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
} }
@ -190,7 +190,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
flow = &q->flows[idx]; flow = &q->flows[idx];
flow_queue_add(flow, skb); flow_queue_add(flow, skb);
q->backlogs[idx] += qdisc_pkt_len(skb); q->backlogs[idx] += qdisc_pkt_len(skb);
sch->qstats.backlog += qdisc_pkt_len(skb); qdisc_qstats_backlog_inc(sch, skb);
if (list_empty(&flow->flowchain)) { if (list_empty(&flow->flowchain)) {
list_add_tail(&flow->flowchain, &q->new_flows); list_add_tail(&flow->flowchain, &q->new_flows);

View File

@ -209,7 +209,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break; break;
case RED_PROB_MARK: case RED_PROB_MARK:
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) { if (!gred_use_ecn(t) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++; q->stats.prob_drop++;
goto congestion_drop; goto congestion_drop;
@ -219,7 +219,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break; break;
case RED_HARD_MARK: case RED_HARD_MARK:
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (gred_use_harddrop(t) || !gred_use_ecn(t) || if (gred_use_harddrop(t) || !gred_use_ecn(t) ||
!INET_ECN_set_ce(skb)) { !INET_ECN_set_ce(skb)) {
q->stats.forced_drop++; q->stats.forced_drop++;

View File

@ -1591,7 +1591,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = hfsc_classify(skb, sch, &err); cl = hfsc_classify(skb, sch, &err);
if (cl == NULL) { if (cl == NULL) {
if (err & __NET_XMIT_BYPASS) if (err & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return err; return err;
} }
@ -1600,7 +1600,7 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
sch->qstats.drops++; qdisc_qstats_drop(sch);
} }
return err; return err;
} }
@ -1643,7 +1643,7 @@ hfsc_dequeue(struct Qdisc *sch)
*/ */
cl = vttree_get_minvt(&q->root, cur_time); cl = vttree_get_minvt(&q->root, cur_time);
if (cl == NULL) { if (cl == NULL) {
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
hfsc_schedule_watchdog(sch); hfsc_schedule_watchdog(sch);
return NULL; return NULL;
} }
@ -1698,7 +1698,7 @@ hfsc_drop(struct Qdisc *sch)
list_move_tail(&cl->dlist, &q->droplist); list_move_tail(&cl->dlist, &q->droplist);
} }
cl->qstats.drops++; cl->qstats.drops++;
sch->qstats.drops++; qdisc_qstats_drop(sch);
sch->q.qlen--; sch->q.qlen--;
return len; return len;
} }

View File

@ -376,8 +376,8 @@ static unsigned int hhf_drop(struct Qdisc *sch)
struct sk_buff *skb = dequeue_head(bucket); struct sk_buff *skb = dequeue_head(bucket);
sch->q.qlen--; sch->q.qlen--;
sch->qstats.drops++; qdisc_qstats_drop(sch);
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb); kfree_skb(skb);
} }
@ -395,7 +395,7 @@ static int hhf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
bucket = &q->buckets[idx]; bucket = &q->buckets[idx];
bucket_add(bucket, skb); bucket_add(bucket, skb);
sch->qstats.backlog += qdisc_pkt_len(skb); qdisc_qstats_backlog_inc(sch, skb);
if (list_empty(&bucket->bucketchain)) { if (list_empty(&bucket->bucketchain)) {
unsigned int weight; unsigned int weight;
@ -457,7 +457,7 @@ begin:
if (bucket->head) { if (bucket->head) {
skb = dequeue_head(bucket); skb = dequeue_head(bucket);
sch->q.qlen--; sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
} }
if (!skb) { if (!skb) {

View File

@ -586,13 +586,13 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
} else if (!cl) { } else if (!cl) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
#endif #endif
} else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) { } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) { if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; qdisc_qstats_drop(sch);
cl->qstats.drops++; cl->qstats.drops++;
} }
return ret; return ret;
@ -925,7 +925,7 @@ ok:
goto ok; goto ok;
} }
} }
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (likely(next_event > q->now)) { if (likely(next_event > q->now)) {
if (!test_bit(__QDISC_STATE_DEACTIVATED, if (!test_bit(__QDISC_STATE_DEACTIVATED,
&qdisc_root_sleeping(q->watchdog.qdisc)->state)) { &qdisc_root_sleeping(q->watchdog.qdisc)->state)) {

View File

@ -69,7 +69,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
switch (result) { switch (result) {
case TC_ACT_SHOT: case TC_ACT_SHOT:
result = TC_ACT_SHOT; result = TC_ACT_SHOT;
sch->qstats.drops++; qdisc_qstats_drop(sch);
break; break;
case TC_ACT_STOLEN: case TC_ACT_STOLEN:
case TC_ACT_QUEUED: case TC_ACT_QUEUED:

View File

@ -75,7 +75,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (qdisc == NULL) { if (qdisc == NULL) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
} }
@ -87,7 +87,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
sch->qstats.drops++; qdisc_qstats_drop(sch);
return ret; return ret;
} }

View File

@ -429,12 +429,12 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
/* Drop packet? */ /* Drop packet? */
if (loss_event(q)) { if (loss_event(q)) {
if (q->ecn && INET_ECN_set_ce(skb)) if (q->ecn && INET_ECN_set_ce(skb))
sch->qstats.drops++; /* mark packet */ qdisc_qstats_drop(sch); /* mark packet */
else else
--count; --count;
} }
if (count == 0) { if (count == 0) {
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
} }
@ -478,7 +478,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (unlikely(skb_queue_len(&sch->q) >= sch->limit)) if (unlikely(skb_queue_len(&sch->q) >= sch->limit))
return qdisc_reshape_fail(skb, sch); return qdisc_reshape_fail(skb, sch);
sch->qstats.backlog += qdisc_pkt_len(skb); qdisc_qstats_backlog_inc(sch, skb);
cb = netem_skb_cb(skb); cb = netem_skb_cb(skb);
if (q->gap == 0 || /* not doing reordering */ if (q->gap == 0 || /* not doing reordering */
@ -549,15 +549,14 @@ static unsigned int netem_drop(struct Qdisc *sch)
sch->q.qlen--; sch->q.qlen--;
skb->next = NULL; skb->next = NULL;
skb->prev = NULL; skb->prev = NULL;
len = qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
sch->qstats.backlog -= len;
kfree_skb(skb); kfree_skb(skb);
} }
} }
if (!len && q->qdisc && q->qdisc->ops->drop) if (!len && q->qdisc && q->qdisc->ops->drop)
len = q->qdisc->ops->drop(q->qdisc); len = q->qdisc->ops->drop(q->qdisc);
if (len) if (len)
sch->qstats.drops++; qdisc_qstats_drop(sch);
return len; return len;
} }
@ -575,7 +574,7 @@ tfifo_dequeue:
skb = __skb_dequeue(&sch->q); skb = __skb_dequeue(&sch->q);
if (skb) { if (skb) {
deliver: deliver:
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_unthrottled(sch); qdisc_unthrottled(sch);
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
return skb; return skb;
@ -610,7 +609,7 @@ deliver:
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
sch->qstats.drops++; qdisc_qstats_drop(sch);
qdisc_tree_decrease_qlen(sch, 1); qdisc_tree_decrease_qlen(sch, 1);
} }
} }

View File

@ -232,7 +232,7 @@ static int pie_change(struct Qdisc *sch, struct nlattr *opt)
while (sch->q.qlen > sch->limit) { while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = __skb_dequeue(&sch->q); struct sk_buff *skb = __skb_dequeue(&sch->q);
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
qdisc_drop(skb, sch); qdisc_drop(skb, sch);
} }
qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen); qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);

View File

@ -77,7 +77,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (qdisc == NULL) { if (qdisc == NULL) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
} }
@ -89,7 +89,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
sch->qstats.drops++; qdisc_qstats_drop(sch);
return ret; return ret;
} }

View File

@ -1229,7 +1229,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl = qfq_classify(skb, sch, &err); cl = qfq_classify(skb, sch, &err);
if (cl == NULL) { if (cl == NULL) {
if (err & __NET_XMIT_BYPASS) if (err & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return err; return err;
} }
@ -1249,7 +1249,7 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
pr_debug("qfq_enqueue: enqueue failed %d\n", err); pr_debug("qfq_enqueue: enqueue failed %d\n", err);
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
cl->qstats.drops++; cl->qstats.drops++;
sch->qstats.drops++; qdisc_qstats_drop(sch);
} }
return err; return err;
} }

View File

@ -74,7 +74,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break; break;
case RED_PROB_MARK: case RED_PROB_MARK:
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) {
q->stats.prob_drop++; q->stats.prob_drop++;
goto congestion_drop; goto congestion_drop;
@ -84,7 +84,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break; break;
case RED_HARD_MARK: case RED_HARD_MARK:
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (red_use_harddrop(q) || !red_use_ecn(q) || if (red_use_harddrop(q) || !red_use_ecn(q) ||
!INET_ECN_set_ce(skb)) { !INET_ECN_set_ce(skb)) {
q->stats.forced_drop++; q->stats.forced_drop++;
@ -100,7 +100,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sch->q.qlen++; sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) { } else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++; q->stats.pdrop++;
sch->qstats.drops++; qdisc_qstats_drop(sch);
} }
return ret; return ret;
@ -142,7 +142,7 @@ static unsigned int red_drop(struct Qdisc *sch)
if (child->ops->drop && (len = child->ops->drop(child)) > 0) { if (child->ops->drop && (len = child->ops->drop(child)) > 0) {
q->stats.other++; q->stats.other++;
sch->qstats.drops++; qdisc_qstats_drop(sch);
sch->q.qlen--; sch->q.qlen--;
return len; return len;
} }

View File

@ -290,7 +290,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
struct flow_keys keys; struct flow_keys keys;
if (unlikely(sch->q.qlen >= q->limit)) { if (unlikely(sch->q.qlen >= q->limit)) {
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
q->stats.queuedrop++; q->stats.queuedrop++;
goto drop; goto drop;
} }
@ -348,7 +348,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
sfb_skb_cb(skb)->hashes[slot] = 0; sfb_skb_cb(skb)->hashes[slot] = 0;
if (unlikely(minqlen >= q->max)) { if (unlikely(minqlen >= q->max)) {
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
q->stats.bucketdrop++; q->stats.bucketdrop++;
goto drop; goto drop;
} }
@ -376,7 +376,7 @@ static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
} }
if (sfb_rate_limit(skb, q)) { if (sfb_rate_limit(skb, q)) {
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
q->stats.penaltydrop++; q->stats.penaltydrop++;
goto drop; goto drop;
} }
@ -411,7 +411,7 @@ enqueue:
increment_qlen(skb, q); increment_qlen(skb, q);
} else if (net_xmit_drop_count(ret)) { } else if (net_xmit_drop_count(ret)) {
q->stats.childdrop++; q->stats.childdrop++;
sch->qstats.drops++; qdisc_qstats_drop(sch);
} }
return ret; return ret;
@ -420,7 +420,7 @@ drop:
return NET_XMIT_CN; return NET_XMIT_CN;
other_drop: other_drop:
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
} }

View File

@ -331,8 +331,8 @@ drop:
sfq_dec(q, x); sfq_dec(q, x);
kfree_skb(skb); kfree_skb(skb);
sch->q.qlen--; sch->q.qlen--;
sch->qstats.drops++; qdisc_qstats_drop(sch);
sch->qstats.backlog -= len; qdisc_qstats_backlog_dec(sch, skb);
return len; return len;
} }
@ -379,7 +379,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
hash = sfq_classify(skb, sch, &ret); hash = sfq_classify(skb, sch, &ret);
if (hash == 0) { if (hash == 0) {
if (ret & __NET_XMIT_BYPASS) if (ret & __NET_XMIT_BYPASS)
sch->qstats.drops++; qdisc_qstats_drop(sch);
kfree_skb(skb); kfree_skb(skb);
return ret; return ret;
} }
@ -409,7 +409,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
break; break;
case RED_PROB_MARK: case RED_PROB_MARK:
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (sfq_prob_mark(q)) { if (sfq_prob_mark(q)) {
/* We know we have at least one packet in queue */ /* We know we have at least one packet in queue */
if (sfq_headdrop(q) && if (sfq_headdrop(q) &&
@ -426,7 +426,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
goto congestion_drop; goto congestion_drop;
case RED_HARD_MARK: case RED_HARD_MARK:
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
if (sfq_hard_mark(q)) { if (sfq_hard_mark(q)) {
/* We know we have at least one packet in queue */ /* We know we have at least one packet in queue */
if (sfq_headdrop(q) && if (sfq_headdrop(q) &&
@ -461,7 +461,7 @@ congestion_drop:
} }
enqueue: enqueue:
sch->qstats.backlog += qdisc_pkt_len(skb); qdisc_qstats_backlog_inc(sch, skb);
slot->backlog += qdisc_pkt_len(skb); slot->backlog += qdisc_pkt_len(skb);
slot_queue_add(slot, skb); slot_queue_add(slot, skb);
sfq_inc(q, x); sfq_inc(q, x);
@ -520,7 +520,7 @@ next_slot:
sfq_dec(q, a); sfq_dec(q, a);
qdisc_bstats_update(sch, skb); qdisc_bstats_update(sch, skb);
sch->q.qlen--; sch->q.qlen--;
sch->qstats.backlog -= qdisc_pkt_len(skb); qdisc_qstats_backlog_dec(sch, skb);
slot->backlog -= qdisc_pkt_len(skb); slot->backlog -= qdisc_pkt_len(skb);
/* Is the slot empty? */ /* Is the slot empty? */
if (slot->qlen == 0) { if (slot->qlen == 0) {
@ -586,7 +586,8 @@ static void sfq_rehash(struct Qdisc *sch)
if (x == SFQ_EMPTY_SLOT) { if (x == SFQ_EMPTY_SLOT) {
x = q->dep[0].next; /* get a free slot */ x = q->dep[0].next; /* get a free slot */
if (x >= SFQ_MAX_FLOWS) { if (x >= SFQ_MAX_FLOWS) {
drop: sch->qstats.backlog -= qdisc_pkt_len(skb); drop:
qdisc_qstats_backlog_dec(sch, skb);
kfree_skb(skb); kfree_skb(skb);
dropped++; dropped++;
continue; continue;

View File

@ -175,7 +175,7 @@ static int tbf_segment(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(segs, q->qdisc); ret = qdisc_enqueue(segs, q->qdisc);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
sch->qstats.drops++; qdisc_qstats_drop(sch);
} else { } else {
nb++; nb++;
} }
@ -201,7 +201,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, q->qdisc); ret = qdisc_enqueue(skb, q->qdisc);
if (ret != NET_XMIT_SUCCESS) { if (ret != NET_XMIT_SUCCESS) {
if (net_xmit_drop_count(ret)) if (net_xmit_drop_count(ret))
sch->qstats.drops++; qdisc_qstats_drop(sch);
return ret; return ret;
} }
@ -216,7 +216,7 @@ static unsigned int tbf_drop(struct Qdisc *sch)
if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
sch->q.qlen--; sch->q.qlen--;
sch->qstats.drops++; qdisc_qstats_drop(sch);
} }
return len; return len;
} }
@ -281,7 +281,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
(cf. CSZ, HPFQ, HFSC) (cf. CSZ, HPFQ, HFSC)
*/ */
sch->qstats.overlimits++; qdisc_qstats_overlimit(sch);
} }
return NULL; return NULL;
} }