diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index e25183333807143eafe86e4f0f55c94bcf8571b8..2f80d01d42a6d8b971345229d407f062df921b04 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -1060,8 +1060,8 @@ static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio) } if (cl->quantum <= 0 || cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) { - pr_warning("CBQ: class %08x has bad quantum==%ld, repaired.\n", - cl->common.classid, cl->quantum); + pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n", + cl->common.classid, cl->quantum); cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1; } } diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c index 0952fd2684e4894f3d5b11902d4e7e7f1fb3556d..7cd49e5f2dfe4548506a34af8f2302b7a7b0ec93 100644 --- a/net/sched/sch_dsmark.c +++ b/net/sched/sch_dsmark.c @@ -303,8 +303,8 @@ static struct sk_buff *dsmark_dequeue(struct Qdisc *sch) * and don't need yet another qdisc as a bypass. */ if (p->mask[index] != 0xff || p->value[index]) - pr_warning("dsmark_dequeue: unsupported protocol %d\n", - ntohs(skb->protocol)); + pr_warn("dsmark_dequeue: unsupported protocol %d\n", + ntohs(skb->protocol)); break; } diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c index d42234c0f13bf4d4829930e0f6bcb30681ad4782..12cbc09157fcce67c314076e3cf0ed690f6f8818 100644 --- a/net/sched/sch_gred.c +++ b/net/sched/sch_gred.c @@ -370,8 +370,8 @@ static inline int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps) for (i = table->DPs; i < MAX_DPs; i++) { if (table->tab[i]) { - pr_warning("GRED: Warning: Destroying " - "shadowed VQ 0x%x\n", i); + pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n", + i); gred_destroy_vq(table->tab[i]); table->tab[i] = NULL; } diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index 6b0e854b0115170a209a1d2a96a92ae867309716..e5988101e1fd3b4b3a3fd20c705a6912bcc886eb 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -712,7 +712,7 @@ static s64 htb_do_events(struct htb_sched *q, const int level, /* too much load - let's continue after a break for scheduling */ if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) { - pr_warning("htb: too many events!\n"); + pr_warn("htb: too many events!\n"); q->warned |= HTB_WARN_TOOMANYEVENTS; } @@ -1488,15 +1488,13 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, cl->quantum = min_t(u64, quantum, INT_MAX); if (!hopt->quantum && cl->quantum < 1000) { - pr_warning( - "HTB: quantum of class %X is small. Consider r2q change.\n", - cl->common.classid); + pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n", + cl->common.classid); cl->quantum = 1000; } if (!hopt->quantum && cl->quantum > 200000) { - pr_warning( - "HTB: quantum of class %X is big. Consider r2q change.\n", - cl->common.classid); + pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n", + cl->common.classid); cl->quantum = 200000; } if (hopt->quantum)