提交 bfe0d029 编写于 作者: E Eric Dumazet 提交者: David S. Miller

net_sched: factorize qdisc stats handling

HTB takes into account skb is segmented in stats updates.
Generalize this to all schedulers.

They should use qdisc_bstats_update() helper instead of manipulating
bstats.bytes and bstats.packets

Add bstats_update() helper too for classes that use
gnet_stats_basic_packed fields.

Note : Right now, TCQ_F_CAN_BYPASS shortcurt can be taken only if no
stab is setup on qdisc.
Signed-off-by: NEric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 f1593d22
...@@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q) ...@@ -207,7 +207,7 @@ static inline int qdisc_qlen(struct Qdisc *q)
return q->q.qlen; return q->q.qlen;
} }
static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb) static inline struct qdisc_skb_cb *qdisc_skb_cb(const struct sk_buff *skb)
{ {
return (struct qdisc_skb_cb *)skb->cb; return (struct qdisc_skb_cb *)skb->cb;
} }
...@@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev) ...@@ -394,7 +394,7 @@ static inline bool qdisc_tx_is_noop(const struct net_device *dev)
return true; return true;
} }
static inline unsigned int qdisc_pkt_len(struct sk_buff *skb) static inline unsigned int qdisc_pkt_len(const struct sk_buff *skb)
{ {
return qdisc_skb_cb(skb)->pkt_len; return qdisc_skb_cb(skb)->pkt_len;
} }
...@@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch) ...@@ -426,10 +426,18 @@ static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
return qdisc_enqueue(skb, sch) & NET_XMIT_MASK; return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
} }
static inline void __qdisc_update_bstats(struct Qdisc *sch, unsigned int len)
static inline void bstats_update(struct gnet_stats_basic_packed *bstats,
const struct sk_buff *skb)
{
bstats->bytes += qdisc_pkt_len(skb);
bstats->packets += skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
}
static inline void qdisc_bstats_update(struct Qdisc *sch,
const struct sk_buff *skb)
{ {
sch->bstats.bytes += len; bstats_update(&sch->bstats, skb);
sch->bstats.packets++;
} }
static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
...@@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch, ...@@ -437,7 +445,7 @@ static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
{ {
__skb_queue_tail(list, skb); __skb_queue_tail(list, skb);
sch->qstats.backlog += qdisc_pkt_len(skb); sch->qstats.backlog += qdisc_pkt_len(skb);
__qdisc_update_bstats(sch, qdisc_pkt_len(skb)); qdisc_bstats_update(sch, skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -2297,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, ...@@ -2297,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
*/ */
if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
skb_dst_force(skb); skb_dst_force(skb);
__qdisc_update_bstats(q, skb->len);
qdisc_skb_cb(skb)->pkt_len = skb->len;
qdisc_bstats_update(q, skb);
if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
if (unlikely(contended)) { if (unlikely(contended)) {
spin_unlock(&q->busylock); spin_unlock(&q->busylock);
......
...@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb, ...@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb,
spin_lock(&p->tcf_lock); spin_lock(&p->tcf_lock);
p->tcf_tm.lastuse = jiffies; p->tcf_tm.lastuse = jiffies;
p->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&p->tcf_bstats, skb);
p->tcf_bstats.packets++;
action = p->tcf_action; action = p->tcf_action;
update_flags = p->update_flags; update_flags = p->update_flags;
spin_unlock(&p->tcf_lock); spin_unlock(&p->tcf_lock);
......
...@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a, ...@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
spin_lock(&ipt->tcf_lock); spin_lock(&ipt->tcf_lock);
ipt->tcf_tm.lastuse = jiffies; ipt->tcf_tm.lastuse = jiffies;
ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&ipt->tcf_bstats, skb);
ipt->tcf_bstats.packets++;
/* yes, we have to worry about both in and out dev /* yes, we have to worry about both in and out dev
worry later - danger - this API seems to have changed worry later - danger - this API seems to have changed
......
...@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a, ...@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
spin_lock(&m->tcf_lock); spin_lock(&m->tcf_lock);
m->tcf_tm.lastuse = jiffies; m->tcf_tm.lastuse = jiffies;
m->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&m->tcf_bstats, skb);
m->tcf_bstats.packets++;
dev = m->tcfm_dev; dev = m->tcfm_dev;
if (!dev) { if (!dev) {
......
...@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a, ...@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
egress = p->flags & TCA_NAT_FLAG_EGRESS; egress = p->flags & TCA_NAT_FLAG_EGRESS;
action = p->tcf_action; action = p->tcf_action;
p->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&p->tcf_bstats, skb);
p->tcf_bstats.packets++;
spin_unlock(&p->tcf_lock); spin_unlock(&p->tcf_lock);
......
...@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a, ...@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
bad: bad:
p->tcf_qstats.overlimits++; p->tcf_qstats.overlimits++;
done: done:
p->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&p->tcf_bstats, skb);
p->tcf_bstats.packets++;
spin_unlock(&p->tcf_lock); spin_unlock(&p->tcf_lock);
return p->tcf_action; return p->tcf_action;
} }
......
...@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a, ...@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
spin_lock(&police->tcf_lock); spin_lock(&police->tcf_lock);
police->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&police->tcf_bstats, skb);
police->tcf_bstats.packets++;
if (police->tcfp_ewma_rate && if (police->tcfp_ewma_rate &&
police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
......
...@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result ...@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
spin_lock(&d->tcf_lock); spin_lock(&d->tcf_lock);
d->tcf_tm.lastuse = jiffies; d->tcf_tm.lastuse = jiffies;
d->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&d->tcf_bstats, skb);
d->tcf_bstats.packets++;
/* print policy string followed by _ then packet count /* print policy string followed by _ then packet count
* Example if this was the 3rd packet and the string was "hello" * Example if this was the 3rd packet and the string was "hello"
......
...@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a, ...@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
spin_lock(&d->tcf_lock); spin_lock(&d->tcf_lock);
d->tcf_tm.lastuse = jiffies; d->tcf_tm.lastuse = jiffies;
d->tcf_bstats.bytes += qdisc_pkt_len(skb); bstats_update(&d->tcf_bstats, skb);
d->tcf_bstats.packets++;
if (d->flags & SKBEDIT_F_PRIORITY) if (d->flags & SKBEDIT_F_PRIORITY)
skb->priority = d->priority; skb->priority = d->priority;
......
...@@ -422,10 +422,8 @@ drop: __maybe_unused ...@@ -422,10 +422,8 @@ drop: __maybe_unused
} }
return ret; return ret;
} }
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++; bstats_update(&flow->bstats, skb);
flow->bstats.bytes += qdisc_pkt_len(skb);
flow->bstats.packets++;
/* /*
* Okay, this may seem weird. We pretend we've dropped the packet if * Okay, this may seem weird. We pretend we've dropped the packet if
* it goes via ATM. The reason for this is that the outer qdisc * it goes via ATM. The reason for this is that the outer qdisc
......
...@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, cl->q); ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
sch->bstats.packets++; qdisc_bstats_update(sch, skb);
sch->bstats.bytes += qdisc_pkt_len(skb);
cbq_mark_toplevel(q, cl); cbq_mark_toplevel(q, cl);
if (!cl->next_alive) if (!cl->next_alive)
cbq_activate_class(cl); cbq_activate_class(cl);
...@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child) ...@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
ret = qdisc_enqueue(skb, cl->q); ret = qdisc_enqueue(skb, cl->q);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->q.qlen++; sch->q.qlen++;
sch->bstats.packets++; qdisc_bstats_update(sch, skb);
sch->bstats.bytes += qdisc_pkt_len(skb);
if (!cl->next_alive) if (!cl->next_alive)
cbq_activate_class(cl); cbq_activate_class(cl);
return 0; return 0;
......
...@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
{ {
struct drr_sched *q = qdisc_priv(sch); struct drr_sched *q = qdisc_priv(sch);
struct drr_class *cl; struct drr_class *cl;
unsigned int len;
int err; int err;
cl = drr_classify(skb, sch, &err); cl = drr_classify(skb, sch, &err);
...@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err; return err;
} }
len = qdisc_pkt_len(skb);
err = qdisc_enqueue(skb, cl->qdisc); err = qdisc_enqueue(skb, cl->qdisc);
if (unlikely(err != NET_XMIT_SUCCESS)) { if (unlikely(err != NET_XMIT_SUCCESS)) {
if (net_xmit_drop_count(err)) { if (net_xmit_drop_count(err)) {
...@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
cl->deficit = cl->quantum; cl->deficit = cl->quantum;
} }
cl->bstats.packets++; bstats_update(&cl->bstats, skb);
cl->bstats.bytes += len; qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
sch->bstats.bytes += len;
sch->q.qlen++; sch->q.qlen++;
return err; return err;
......
...@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
return err; return err;
} }
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
......
...@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (cl->qdisc->q.qlen == 1) if (cl->qdisc->q.qlen == 1)
set_active(cl, qdisc_pkt_len(skb)); set_active(cl, qdisc_pkt_len(skb));
cl->bstats.packets++; bstats_update(&cl->bstats, skb);
cl->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
sch->bstats.bytes += qdisc_pkt_len(skb);
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
......
...@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
} }
return ret; return ret;
} else { } else {
cl->bstats.packets += bstats_update(&cl->bstats, skb);
skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
cl->bstats.bytes += qdisc_pkt_len(skb);
htb_activate(q, cl); htb_activate(q, cl);
} }
sch->q.qlen++; sch->q.qlen++;
sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; qdisc_bstats_update(sch, skb);
sch->bstats.bytes += qdisc_pkt_len(skb);
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
...@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl, ...@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
htb_add_to_wait_tree(q, cl, diff); htb_add_to_wait_tree(q, cl, diff);
} }
/* update byte stats except for leaves which are already updated */ /* update basic stats except for leaves which are already updated */
if (cl->level) { if (cl->level)
cl->bstats.bytes += bytes; bstats_update(&cl->bstats, skb);
cl->bstats.packets += skb_is_gso(skb)?
skb_shinfo(skb)->gso_segs:1;
}
cl = cl->parent; cl = cl->parent;
} }
} }
......
...@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
result = tc_classify(skb, p->filter_list, &res); result = tc_classify(skb, p->filter_list, &res);
sch->bstats.packets++; qdisc_bstats_update(sch, skb);
sch->bstats.bytes += qdisc_pkt_len(skb);
switch (result) { switch (result) {
case TC_ACT_SHOT: case TC_ACT_SHOT:
result = TC_ACT_SHOT; result = TC_ACT_SHOT;
......
...@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc); ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
sch->q.qlen++; sch->q.qlen++;
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
} else if (net_xmit_drop_count(ret)) { } else if (net_xmit_drop_count(ret)) {
sch->qstats.drops++; sch->qstats.drops++;
} }
...@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) ...@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
__skb_queue_after(list, skb, nskb); __skb_queue_after(list, skb, nskb);
sch->qstats.backlog += qdisc_pkt_len(nskb); sch->qstats.backlog += qdisc_pkt_len(nskb);
sch->bstats.bytes += qdisc_pkt_len(nskb); qdisc_bstats_update(sch, nskb);
sch->bstats.packets++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
ret = qdisc_enqueue(skb, qdisc); ret = qdisc_enqueue(skb, qdisc);
if (ret == NET_XMIT_SUCCESS) { if (ret == NET_XMIT_SUCCESS) {
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
sch->q.qlen++; sch->q.qlen++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
ret = qdisc_enqueue(skb, child); ret = qdisc_enqueue(skb, child);
if (likely(ret == NET_XMIT_SUCCESS)) { if (likely(ret == NET_XMIT_SUCCESS)) {
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
sch->q.qlen++; sch->q.qlen++;
} else if (net_xmit_drop_count(ret)) { } else if (net_xmit_drop_count(ret)) {
q->stats.pdrop++; q->stats.pdrop++;
......
...@@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch) ...@@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
slot->allot = q->scaled_quantum; slot->allot = q->scaled_quantum;
} }
if (++sch->q.qlen <= q->limit) { if (++sch->q.qlen <= q->limit) {
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
} }
sch->q.qlen++; sch->q.qlen++;
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
...@@ -83,8 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch) ...@@ -83,8 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
if (q->q.qlen < dev->tx_queue_len) { if (q->q.qlen < dev->tx_queue_len) {
__skb_queue_tail(&q->q, skb); __skb_queue_tail(&q->q, skb);
sch->bstats.bytes += qdisc_pkt_len(skb); qdisc_bstats_update(sch, skb);
sch->bstats.packets++;
return NET_XMIT_SUCCESS; return NET_XMIT_SUCCESS;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册