[NET_SCHED]: sch_htb: use generic estimator

Use the generic estimator instead of reimplementing (parts of) it.
For compatibility always create a default estimator for new classes.

Signed-off-by: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Patrick McHardy 2007-07-02 22:48:13 -07:00 committed by David S. Miller
parent 4bdf39911e
commit ee39e10c27
1 changed files with 24 additions and 61 deletions

View File

@ -69,8 +69,6 @@
*/ */
#define HTB_HSIZE 16 /* classid hash size */ #define HTB_HSIZE 16 /* classid hash size */
#define HTB_EWMAC 2 /* rate average over HTB_EWMAC*HTB_HSIZE sec */
#define HTB_RATECM 1 /* whether to use rate computer */
#define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */ #define HTB_HYSTERESIS 1 /* whether to use mode hysteresis for speedup */
#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */ #define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
@ -95,12 +93,6 @@ struct htb_class {
struct tc_htb_xstats xstats; /* our special stats */ struct tc_htb_xstats xstats; /* our special stats */
int refcnt; /* usage count of this class */ int refcnt; /* usage count of this class */
#ifdef HTB_RATECM
/* rate measurement counters */
unsigned long rate_bytes, sum_bytes;
unsigned long rate_packets, sum_packets;
#endif
/* topology */ /* topology */
int level; /* our level (see above) */ int level; /* our level (see above) */
struct htb_class *parent; /* parent class */ struct htb_class *parent; /* parent class */
@ -194,10 +186,6 @@ struct htb_sched {
int rate2quantum; /* quant = rate / rate2quantum */ int rate2quantum; /* quant = rate / rate2quantum */
psched_time_t now; /* cached dequeue time */ psched_time_t now; /* cached dequeue time */
struct qdisc_watchdog watchdog; struct qdisc_watchdog watchdog;
#ifdef HTB_RATECM
struct timer_list rttim; /* rate computer timer */
int recmp_bucket; /* which hash bucket to recompute next */
#endif
/* non shaped skbs; let them go directly thru */ /* non shaped skbs; let them go directly thru */
struct sk_buff_head direct_queue; struct sk_buff_head direct_queue;
@ -677,34 +665,6 @@ static int htb_requeue(struct sk_buff *skb, struct Qdisc *sch)
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
#ifdef HTB_RATECM
#define RT_GEN(D,R) R+=D-(R/HTB_EWMAC);D=0
static void htb_rate_timer(unsigned long arg)
{
struct Qdisc *sch = (struct Qdisc *)arg;
struct htb_sched *q = qdisc_priv(sch);
struct hlist_node *p;
struct htb_class *cl;
/* lock queue so that we can muck with it */
spin_lock_bh(&sch->dev->queue_lock);
q->rttim.expires = jiffies + HZ;
add_timer(&q->rttim);
/* scan and recompute one bucket at time */
if (++q->recmp_bucket >= HTB_HSIZE)
q->recmp_bucket = 0;
hlist_for_each_entry(cl,p, q->hash + q->recmp_bucket, hlist) {
RT_GEN(cl->sum_bytes, cl->rate_bytes);
RT_GEN(cl->sum_packets, cl->rate_packets);
}
spin_unlock_bh(&sch->dev->queue_lock);
}
#endif
/** /**
* htb_charge_class - charges amount "bytes" to leaf and ancestors * htb_charge_class - charges amount "bytes" to leaf and ancestors
* *
@ -750,11 +710,6 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
if (cl->cmode != HTB_CAN_SEND) if (cl->cmode != HTB_CAN_SEND)
htb_add_to_wait_tree(q, cl, diff); htb_add_to_wait_tree(q, cl, diff);
} }
#ifdef HTB_RATECM
/* update rate counters */
cl->sum_bytes += bytes;
cl->sum_packets++;
#endif
/* update byte stats except for leaves which are already updated */ /* update byte stats except for leaves which are already updated */
if (cl->level) { if (cl->level) {
@ -1095,13 +1050,6 @@ static int htb_init(struct Qdisc *sch, struct rtattr *opt)
if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */ if (q->direct_qlen < 2) /* some devices have zero tx_queue_len */
q->direct_qlen = 2; q->direct_qlen = 2;
#ifdef HTB_RATECM
init_timer(&q->rttim);
q->rttim.function = htb_rate_timer;
q->rttim.data = (unsigned long)sch;
q->rttim.expires = jiffies + HZ;
add_timer(&q->rttim);
#endif
if ((q->rate2quantum = gopt->rate2quantum) < 1) if ((q->rate2quantum = gopt->rate2quantum) < 1)
q->rate2quantum = 1; q->rate2quantum = 1;
q->defcls = gopt->defcls; q->defcls = gopt->defcls;
@ -1175,11 +1123,6 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
{ {
struct htb_class *cl = (struct htb_class *)arg; struct htb_class *cl = (struct htb_class *)arg;
#ifdef HTB_RATECM
cl->rate_est.bps = cl->rate_bytes / (HTB_EWMAC * HTB_HSIZE);
cl->rate_est.pps = cl->rate_packets / (HTB_EWMAC * HTB_HSIZE);
#endif
if (!cl->level && cl->un.leaf.q) if (!cl->level && cl->un.leaf.q)
cl->qstats.qlen = cl->un.leaf.q->q.qlen; cl->qstats.qlen = cl->un.leaf.q->q.qlen;
cl->xstats.tokens = cl->tokens; cl->xstats.tokens = cl->tokens;
@ -1277,6 +1220,7 @@ static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
BUG_TRAP(cl->un.leaf.q); BUG_TRAP(cl->un.leaf.q);
qdisc_destroy(cl->un.leaf.q); qdisc_destroy(cl->un.leaf.q);
} }
gen_kill_estimator(&cl->bstats, &cl->rate_est);
qdisc_put_rtab(cl->rate); qdisc_put_rtab(cl->rate);
qdisc_put_rtab(cl->ceil); qdisc_put_rtab(cl->ceil);
@ -1305,9 +1249,6 @@ static void htb_destroy(struct Qdisc *sch)
struct htb_sched *q = qdisc_priv(sch); struct htb_sched *q = qdisc_priv(sch);
qdisc_watchdog_cancel(&q->watchdog); qdisc_watchdog_cancel(&q->watchdog);
#ifdef HTB_RATECM
del_timer_sync(&q->rttim);
#endif
/* This line used to be after htb_destroy_class call below /* This line used to be after htb_destroy_class call below
and surprisingly it worked in 2.4. But it must precede it and surprisingly it worked in 2.4. But it must precede it
because filter need its target class alive to be able to call because filter need its target class alive to be able to call
@ -1403,6 +1344,20 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!cl) { /* new class */ if (!cl) { /* new class */
struct Qdisc *new_q; struct Qdisc *new_q;
int prio; int prio;
struct {
struct rtattr rta;
struct gnet_estimator opt;
} est = {
.rta = {
.rta_len = RTA_LENGTH(sizeof(est.opt)),
.rta_type = TCA_RATE,
},
.opt = {
/* 4s interval, 16s averaging constant */
.interval = 2,
.ewma_log = 2,
},
};
/* check for valid classid */ /* check for valid classid */
if (!classid || TC_H_MAJ(classid ^ sch->handle) if (!classid || TC_H_MAJ(classid ^ sch->handle)
@ -1418,6 +1373,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL) if ((cl = kzalloc(sizeof(*cl), GFP_KERNEL)) == NULL)
goto failure; goto failure;
gen_new_estimator(&cl->bstats, &cl->rate_est,
&sch->dev->queue_lock,
tca[TCA_RATE-1] ? : &est.rta);
cl->refcnt = 1; cl->refcnt = 1;
INIT_LIST_HEAD(&cl->sibling); INIT_LIST_HEAD(&cl->sibling);
INIT_HLIST_NODE(&cl->hlist); INIT_HLIST_NODE(&cl->hlist);
@ -1469,8 +1427,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
hlist_add_head(&cl->hlist, q->hash + htb_hash(classid)); hlist_add_head(&cl->hlist, q->hash + htb_hash(classid));
list_add_tail(&cl->sibling, list_add_tail(&cl->sibling,
parent ? &parent->children : &q->root); parent ? &parent->children : &q->root);
} else } else {
if (tca[TCA_RATE-1])
gen_replace_estimator(&cl->bstats, &cl->rate_est,
&sch->dev->queue_lock,
tca[TCA_RATE-1]);
sch_tree_lock(sch); sch_tree_lock(sch);
}
/* it used to be a nasty bug here, we have to check that node /* it used to be a nasty bug here, we have to check that node
is really leaf before changing cl->un.leaf ! */ is really leaf before changing cl->un.leaf ! */