net_sched: replace pr_warning with pr_warn
Prefer pr_warn(... to pr_warning(... Signed-off-by: Yang Yingliang <yangyingliang@huawei.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
d4dd8aeefd
commit
c17988a90f
|
@ -1060,8 +1060,8 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
|
||||||
}
|
}
|
||||||
if (cl->quantum <= 0 ||
|
if (cl->quantum <= 0 ||
|
||||||
cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
|
cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
|
||||||
pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n",
|
pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
|
||||||
cl->common.classid, cl->quantum);
|
cl->common.classid, cl->quantum);
|
||||||
cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
|
cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -303,8 +303,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
|
||||||
* and don't need yet another qdisc as a bypass.
|
* and don't need yet another qdisc as a bypass.
|
||||||
*/
|
*/
|
||||||
if (p->mask[index] != 0xff || p->value[index])
|
if (p->mask[index] != 0xff || p->value[index])
|
||||||
pr_warning("dsmark_dequeue: unsupported protocol %d\n",
|
pr_warn("dsmark_dequeue: unsupported protocol %d\n",
|
||||||
ntohs(skb->protocol));
|
ntohs(skb->protocol));
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -370,8 +370,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps)
|
||||||
|
|
||||||
for (i = table->DPs; i < MAX_DPs; i++) {
|
for (i = table->DPs; i < MAX_DPs; i++) {
|
||||||
if (table->tab[i]) {
|
if (table->tab[i]) {
|
||||||
pr_warning("GRED: Warning: Destroying "
|
pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
|
||||||
"shadowed VQ 0x%x\n", i);
|
i);
|
||||||
gred_destroy_vq(table->tab[i]);
|
gred_destroy_vq(table->tab[i]);
|
||||||
table->tab[i] = NULL;
|
table->tab[i] = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -712,7 +712,7 @@ static s64 htb_do_events(struct htb_sched *q, const int level,
|
||||||
|
|
||||||
/* too much load - let's continue after a break for scheduling */
|
/* too much load - let's continue after a break for scheduling */
|
||||||
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
|
if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
|
||||||
pr_warning("htb: too many events!\n");
|
pr_warn("htb: too many events!\n");
|
||||||
q->warned |= HTB_WARN_TOOMANYEVENTS;
|
q->warned |= HTB_WARN_TOOMANYEVENTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1488,15 +1488,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
|
||||||
cl->quantum = min_t(u64, quantum, INT_MAX);
|
cl->quantum = min_t(u64, quantum, INT_MAX);
|
||||||
|
|
||||||
if (!hopt->quantum && cl->quantum < 1000) {
|
if (!hopt->quantum && cl->quantum < 1000) {
|
||||||
pr_warning(
|
pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n",
|
||||||
"HTB: quantum of class %X is small. Consider r2q change.\n",
|
cl->common.classid);
|
||||||
cl->common.classid);
|
|
||||||
cl->quantum = 1000;
|
cl->quantum = 1000;
|
||||||
}
|
}
|
||||||
if (!hopt->quantum && cl->quantum > 200000) {
|
if (!hopt->quantum && cl->quantum > 200000) {
|
||||||
pr_warning(
|
pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n",
|
||||||
"HTB: quantum of class %X is big. Consider r2q change.\n",
|
cl->common.classid);
|
||||||
cl->common.classid);
|
|
||||||
cl->quantum = 200000;
|
cl->quantum = 200000;
|
||||||
}
|
}
|
||||||
if (hopt->quantum)
|
if (hopt->quantum)
|
||||||
|
|
Loading…
Reference in New Issue