提交 c4df1bdd 编写于 作者: D David S. Miller

Merge branch 'net-sched-fix-stats-accounting-for-child-NOLOCK-qdiscs'

Paolo Abeni says:

====================
net: sched: fix stats accounting for child NOLOCK qdiscs

Currently, stats accounting for NOLOCK qdisc enslaved to classful (lock)
qdiscs is buggy. Per CPU values are ignored in most places, as a result,
stats dump in the above scenario always report 0 length backlog and parent
backlog len is not updated correctly on NOLOCK qdisc removal.

The first patch address stats dumping, and the second one child qdisc removal.
I'm targeting the net tree as this is a bugfix, but it could be moved to
net-next due to the relatively large diffstat.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -923,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch) ...@@ -923,6 +923,41 @@ static inline void qdisc_qstats_overlimit(struct Qdisc *sch)
sch->qstats.overlimits++; sch->qstats.overlimits++;
} }
static inline int qdisc_qstats_copy(struct gnet_dump *d, struct Qdisc *sch)
{
__u32 qlen = qdisc_qlen_sum(sch);
return gnet_stats_copy_queue(d, sch->cpu_qstats, &sch->qstats, qlen);
}
static inline void qdisc_qstats_qlen_backlog(struct Qdisc *sch, __u32 *qlen,
__u32 *backlog)
{
struct gnet_stats_queue qstats = { 0 };
__u32 len = qdisc_qlen_sum(sch);
__gnet_stats_copy_queue(&qstats, sch->cpu_qstats, &sch->qstats, len);
*qlen = qstats.qlen;
*backlog = qstats.backlog;
}
static inline void qdisc_tree_flush_backlog(struct Qdisc *sch)
{
__u32 qlen, backlog;
qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
qdisc_tree_reduce_backlog(sch, qlen, backlog);
}
static inline void qdisc_purge_queue(struct Qdisc *sch)
{
__u32 qlen, backlog;
qdisc_qstats_qlen_backlog(sch, &qlen, &backlog);
qdisc_reset(sch);
qdisc_tree_reduce_backlog(sch, qlen, backlog);
}
static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh) static inline void qdisc_skb_head_init(struct qdisc_skb_head *qh)
{ {
qh->head = NULL; qh->head = NULL;
...@@ -1106,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new, ...@@ -1106,13 +1141,8 @@ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
sch_tree_lock(sch); sch_tree_lock(sch);
old = *pold; old = *pold;
*pold = new; *pold = new;
if (old != NULL) { if (old != NULL)
unsigned int qlen = old->q.qlen; qdisc_tree_flush_backlog(old);
unsigned int backlog = old->qstats.backlog;
qdisc_reset(old);
qdisc_tree_reduce_backlog(old, qlen, backlog);
}
sch_tree_unlock(sch); sch_tree_unlock(sch);
return old; return old;
......
...@@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1358,9 +1358,11 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg; struct cbq_class *cl = (struct cbq_class *)arg;
__u32 qlen;
cl->xstats.avgidle = cl->avgidle; cl->xstats.avgidle = cl->avgidle;
cl->xstats.undertime = 0; cl->xstats.undertime = 0;
qdisc_qstats_qlen_backlog(cl->q, &qlen, &cl->qstats.backlog);
if (cl->undertime != PSCHED_PASTPERFECT) if (cl->undertime != PSCHED_PASTPERFECT)
cl->xstats.undertime = cl->undertime - q->now; cl->xstats.undertime = cl->undertime - q->now;
...@@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1368,7 +1370,7 @@ cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 || d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats)); return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
...@@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg) ...@@ -1665,17 +1667,13 @@ static int cbq_delete(struct Qdisc *sch, unsigned long arg)
{ {
struct cbq_sched_data *q = qdisc_priv(sch); struct cbq_sched_data *q = qdisc_priv(sch);
struct cbq_class *cl = (struct cbq_class *)arg; struct cbq_class *cl = (struct cbq_class *)arg;
unsigned int qlen, backlog;
if (cl->filters || cl->children || cl == &q->link) if (cl->filters || cl->children || cl == &q->link)
return -EBUSY; return -EBUSY;
sch_tree_lock(sch); sch_tree_lock(sch);
qlen = cl->q->q.qlen; qdisc_purge_queue(cl->q);
backlog = cl->q->qstats.backlog;
qdisc_reset(cl->q);
qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
if (cl->next_alive) if (cl->next_alive)
cbq_deactivate_class(cl); cbq_deactivate_class(cl);
......
...@@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid) ...@@ -50,15 +50,6 @@ static struct drr_class *drr_find_class(struct Qdisc *sch, u32 classid)
return container_of(clc, struct drr_class, common); return container_of(clc, struct drr_class, common);
} }
static void drr_purge_queue(struct drr_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
}
static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = { static const struct nla_policy drr_policy[TCA_DRR_MAX + 1] = {
[TCA_DRR_QUANTUM] = { .type = NLA_U32 }, [TCA_DRR_QUANTUM] = { .type = NLA_U32 },
}; };
...@@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg) ...@@ -167,7 +158,7 @@ static int drr_delete_class(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch); sch_tree_lock(sch);
drr_purge_queue(cl); qdisc_purge_queue(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->common); qdisc_class_hash_remove(&q->clhash, &cl->common);
sch_tree_unlock(sch); sch_tree_unlock(sch);
...@@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -269,7 +260,8 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
struct gnet_dump *d) struct gnet_dump *d)
{ {
struct drr_class *cl = (struct drr_class *)arg; struct drr_class *cl = (struct drr_class *)arg;
__u32 qlen = cl->qdisc->q.qlen; __u32 qlen = qdisc_qlen_sum(cl->qdisc);
struct Qdisc *cl_q = cl->qdisc;
struct tc_drr_stats xstats; struct tc_drr_stats xstats;
memset(&xstats, 0, sizeof(xstats)); memset(&xstats, 0, sizeof(xstats));
...@@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -279,7 +271,7 @@ static int drr_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 || d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qdisc->qstats, qlen) < 0) gnet_stats_copy_queue(d, cl_q->cpu_qstats, &cl_q->qstats, qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
......
...@@ -844,16 +844,6 @@ qdisc_peek_len(struct Qdisc *sch) ...@@ -844,16 +844,6 @@ qdisc_peek_len(struct Qdisc *sch)
return len; return len;
} }
static void
hfsc_purge_queue(struct Qdisc *sch, struct hfsc_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
}
static void static void
hfsc_adjust_levels(struct hfsc_class *cl) hfsc_adjust_levels(struct hfsc_class *cl)
{ {
...@@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid, ...@@ -1076,7 +1066,7 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
qdisc_class_hash_insert(&q->clhash, &cl->cl_common); qdisc_class_hash_insert(&q->clhash, &cl->cl_common);
list_add_tail(&cl->siblings, &parent->children); list_add_tail(&cl->siblings, &parent->children);
if (parent->level == 0) if (parent->level == 0)
hfsc_purge_queue(sch, parent); qdisc_purge_queue(parent->qdisc);
hfsc_adjust_levels(parent); hfsc_adjust_levels(parent);
sch_tree_unlock(sch); sch_tree_unlock(sch);
...@@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg) ...@@ -1112,7 +1102,7 @@ hfsc_delete_class(struct Qdisc *sch, unsigned long arg)
list_del(&cl->siblings); list_del(&cl->siblings);
hfsc_adjust_levels(cl->cl_parent); hfsc_adjust_levels(cl->cl_parent);
hfsc_purge_queue(sch, cl); qdisc_purge_queue(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->cl_common); qdisc_class_hash_remove(&q->clhash, &cl->cl_common);
sch_tree_unlock(sch); sch_tree_unlock(sch);
...@@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1328,8 +1318,9 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
{ {
struct hfsc_class *cl = (struct hfsc_class *)arg; struct hfsc_class *cl = (struct hfsc_class *)arg;
struct tc_hfsc_stats xstats; struct tc_hfsc_stats xstats;
__u32 qlen;
cl->qstats.backlog = cl->qdisc->qstats.backlog; qdisc_qstats_qlen_backlog(cl->qdisc, &qlen, &cl->qstats.backlog);
xstats.level = cl->level; xstats.level = cl->level;
xstats.period = cl->cl_vtperiod; xstats.period = cl->cl_vtperiod;
xstats.work = cl->cl_total; xstats.work = cl->cl_total;
...@@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -1337,7 +1328,7 @@ hfsc_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 || if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->qdisc->q.qlen) < 0) gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
......
...@@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d) ...@@ -1127,10 +1127,9 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
}; };
__u32 qlen = 0; __u32 qlen = 0;
if (!cl->level && cl->leaf.q) { if (!cl->level && cl->leaf.q)
qlen = cl->leaf.q->q.qlen; qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
qs.backlog = cl->leaf.q->qstats.backlog;
}
cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens), cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
INT_MIN, INT_MAX); INT_MIN, INT_MAX);
cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens), cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
...@@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg) ...@@ -1270,13 +1269,8 @@ static int htb_delete(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch); sch_tree_lock(sch);
if (!cl->level) { if (!cl->level)
unsigned int qlen = cl->leaf.q->q.qlen; qdisc_purge_queue(cl->leaf.q);
unsigned int backlog = cl->leaf.q->qstats.backlog;
qdisc_reset(cl->leaf.q);
qdisc_tree_reduce_backlog(cl->leaf.q, qlen, backlog);
}
/* delete from hash and active; remainder in destroy_class */ /* delete from hash and active; remainder in destroy_class */
qdisc_class_hash_remove(&q->clhash, &cl->common); qdisc_class_hash_remove(&q->clhash, &cl->common);
...@@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid, ...@@ -1404,12 +1398,8 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
classid, NULL); classid, NULL);
sch_tree_lock(sch); sch_tree_lock(sch);
if (parent && !parent->level) { if (parent && !parent->level) {
unsigned int qlen = parent->leaf.q->q.qlen;
unsigned int backlog = parent->leaf.q->qstats.backlog;
/* turn parent into inner node */ /* turn parent into inner node */
qdisc_reset(parent->leaf.q); qdisc_purge_queue(parent->leaf.q);
qdisc_tree_reduce_backlog(parent->leaf.q, qlen, backlog);
qdisc_put(parent->leaf.q); qdisc_put(parent->leaf.q);
if (parent->prio_activity) if (parent->prio_activity)
htb_deactivate(q, parent); htb_deactivate(q, parent);
......
...@@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -249,7 +249,7 @@ static int mq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) qdisc_qstats_copy(d, sch) < 0)
return -1; return -1;
return 0; return 0;
} }
......
...@@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -561,8 +561,7 @@ static int mqprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &sch->bstats) < 0 || d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, qdisc_qstats_copy(d, sch) < 0)
&sch->qstats, sch->q.qlen) < 0)
return -1; return -1;
} }
return 0; return 0;
......
...@@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, ...@@ -201,9 +201,9 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
for (i = q->bands; i < q->max_bands; i++) { for (i = q->bands; i < q->max_bands; i++) {
if (q->queues[i] != &noop_qdisc) { if (q->queues[i] != &noop_qdisc) {
struct Qdisc *child = q->queues[i]; struct Qdisc *child = q->queues[i];
q->queues[i] = &noop_qdisc; q->queues[i] = &noop_qdisc;
qdisc_tree_reduce_backlog(child, child->q.qlen, qdisc_tree_flush_backlog(child);
child->qstats.backlog);
qdisc_put(child); qdisc_put(child);
} }
} }
...@@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt, ...@@ -225,9 +225,7 @@ static int multiq_tune(struct Qdisc *sch, struct nlattr *opt,
qdisc_hash_add(child, true); qdisc_hash_add(child, true);
if (old != &noop_qdisc) { if (old != &noop_qdisc) {
qdisc_tree_reduce_backlog(old, qdisc_tree_flush_backlog(old);
old->q.qlen,
old->qstats.backlog);
qdisc_put(old); qdisc_put(old);
} }
sch_tree_unlock(sch); sch_tree_unlock(sch);
...@@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -344,7 +342,7 @@ static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl_q->bstats) < 0 || d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) qdisc_qstats_copy(d, cl_q) < 0)
return -1; return -1;
return 0; return 0;
......
...@@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt, ...@@ -216,12 +216,8 @@ static int prio_tune(struct Qdisc *sch, struct nlattr *opt,
q->bands = qopt->bands; q->bands = qopt->bands;
memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1); memcpy(q->prio2band, qopt->priomap, TC_PRIO_MAX+1);
for (i = q->bands; i < oldbands; i++) { for (i = q->bands; i < oldbands; i++)
struct Qdisc *child = q->queues[i]; qdisc_tree_flush_backlog(q->queues[i]);
qdisc_tree_reduce_backlog(child, child->q.qlen,
child->qstats.backlog);
}
for (i = oldbands; i < q->bands; i++) { for (i = oldbands; i < q->bands; i++) {
q->queues[i] = queues[i]; q->queues[i] = queues[i];
...@@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -365,7 +361,7 @@ static int prio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
cl_q = q->queues[cl - 1]; cl_q = q->queues[cl - 1];
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl_q->bstats) < 0 || d, NULL, &cl_q->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &cl_q->qstats, cl_q->q.qlen) < 0) qdisc_qstats_copy(d, cl_q) < 0)
return -1; return -1;
return 0; return 0;
......
...@@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid) ...@@ -217,15 +217,6 @@ static struct qfq_class *qfq_find_class(struct Qdisc *sch, u32 classid)
return container_of(clc, struct qfq_class, common); return container_of(clc, struct qfq_class, common);
} }
static void qfq_purge_queue(struct qfq_class *cl)
{
unsigned int len = cl->qdisc->q.qlen;
unsigned int backlog = cl->qdisc->qstats.backlog;
qdisc_reset(cl->qdisc);
qdisc_tree_reduce_backlog(cl->qdisc, len, backlog);
}
static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = { static const struct nla_policy qfq_policy[TCA_QFQ_MAX + 1] = {
[TCA_QFQ_WEIGHT] = { .type = NLA_U32 }, [TCA_QFQ_WEIGHT] = { .type = NLA_U32 },
[TCA_QFQ_LMAX] = { .type = NLA_U32 }, [TCA_QFQ_LMAX] = { .type = NLA_U32 },
...@@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg) ...@@ -551,7 +542,7 @@ static int qfq_delete_class(struct Qdisc *sch, unsigned long arg)
sch_tree_lock(sch); sch_tree_lock(sch);
qfq_purge_queue(cl); qdisc_purge_queue(cl->qdisc);
qdisc_class_hash_remove(&q->clhash, &cl->common); qdisc_class_hash_remove(&q->clhash, &cl->common);
sch_tree_unlock(sch); sch_tree_unlock(sch);
...@@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg, ...@@ -655,8 +646,7 @@ static int qfq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch), if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
d, NULL, &cl->bstats) < 0 || d, NULL, &cl->bstats) < 0 ||
gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 || gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
gnet_stats_copy_queue(d, NULL, qdisc_qstats_copy(d, cl->qdisc) < 0)
&cl->qdisc->qstats, cl->qdisc->q.qlen) < 0)
return -1; return -1;
return gnet_stats_copy_app(d, &xstats, sizeof(xstats)); return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
......
...@@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -233,8 +233,7 @@ static int red_change(struct Qdisc *sch, struct nlattr *opt,
q->flags = ctl->flags; q->flags = ctl->flags;
q->limit = ctl->limit; q->limit = ctl->limit;
if (child) { if (child) {
qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, qdisc_tree_flush_backlog(q->qdisc);
q->qdisc->qstats.backlog);
old_child = q->qdisc; old_child = q->qdisc;
q->qdisc = child; q->qdisc = child;
} }
......
...@@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -521,8 +521,7 @@ static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
qdisc_hash_add(child, true); qdisc_hash_add(child, true);
sch_tree_lock(sch); sch_tree_lock(sch);
qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, qdisc_tree_flush_backlog(q->qdisc);
q->qdisc->qstats.backlog);
qdisc_put(q->qdisc); qdisc_put(q->qdisc);
q->qdisc = child; q->qdisc = child;
......
...@@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl, ...@@ -895,7 +895,7 @@ static int taprio_dump_class_stats(struct Qdisc *sch, unsigned long cl,
sch = dev_queue->qdisc_sleeping; sch = dev_queue->qdisc_sleeping;
if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 || if (gnet_stats_copy_basic(&sch->running, d, NULL, &sch->bstats) < 0 ||
gnet_stats_copy_queue(d, NULL, &sch->qstats, sch->q.qlen) < 0) qdisc_qstats_copy(d, sch) < 0)
return -1; return -1;
return 0; return 0;
} }
......
...@@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt, ...@@ -391,8 +391,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt,
sch_tree_lock(sch); sch_tree_lock(sch);
if (child) { if (child) {
qdisc_tree_reduce_backlog(q->qdisc, q->qdisc->q.qlen, qdisc_tree_flush_backlog(q->qdisc);
q->qdisc->qstats.backlog);
qdisc_put(q->qdisc); qdisc_put(q->qdisc);
q->qdisc = child; q->qdisc = child;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册