提交 80e22e96 编写于 作者: J Jakub Kicinski 提交者: David S. Miller

net: sched: gred: provide a better structured dump and expose stats

Currently all GRED's virtual queue data is dumped in a single
array in a single attribute.  This makes it pretty much impossible
to add new fields.  In order to expose more detailed stats add a
new set of attributes.  We can now expose the 64 bit value of bytesin
and all the mark stats which were not part of the original design.
Signed-off-by: NJakub Kicinski <jakub.kicinski@netronome.com>
Reviewed-by: NJohn Hurley <john.hurley@netronome.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 9f5cd0c8
...@@ -291,11 +291,37 @@ enum { ...@@ -291,11 +291,37 @@ enum {
TCA_GRED_DPS, TCA_GRED_DPS,
TCA_GRED_MAX_P, TCA_GRED_MAX_P,
TCA_GRED_LIMIT, TCA_GRED_LIMIT,
TCA_GRED_VQ_LIST, /* nested TCA_GRED_VQ_ENTRY */
__TCA_GRED_MAX, __TCA_GRED_MAX,
}; };
#define TCA_GRED_MAX (__TCA_GRED_MAX - 1) #define TCA_GRED_MAX (__TCA_GRED_MAX - 1)
enum {
TCA_GRED_VQ_ENTRY_UNSPEC,
TCA_GRED_VQ_ENTRY, /* nested TCA_GRED_VQ_* */
__TCA_GRED_VQ_ENTRY_MAX,
};
#define TCA_GRED_VQ_ENTRY_MAX (__TCA_GRED_VQ_ENTRY_MAX - 1)
enum {
TCA_GRED_VQ_UNSPEC,
TCA_GRED_VQ_PAD,
TCA_GRED_VQ_DP, /* u32 */
TCA_GRED_VQ_STAT_BYTES, /* u64 */
TCA_GRED_VQ_STAT_PACKETS, /* u32 */
TCA_GRED_VQ_STAT_BACKLOG, /* u32 */
TCA_GRED_VQ_STAT_PROB_DROP, /* u32 */
TCA_GRED_VQ_STAT_PROB_MARK, /* u32 */
TCA_GRED_VQ_STAT_FORCED_DROP, /* u32 */
TCA_GRED_VQ_STAT_FORCED_MARK, /* u32 */
TCA_GRED_VQ_STAT_PDROP, /* u32 */
TCA_GRED_VQ_STAT_OTHER, /* u32 */
__TCA_GRED_VQ_MAX
};
#define TCA_GRED_VQ_MAX (__TCA_GRED_VQ_MAX - 1)
struct tc_gred_qopt { struct tc_gred_qopt {
__u32 limit; /* HARD maximal queue length (bytes) */ __u32 limit; /* HARD maximal queue length (bytes) */
__u32 qth_min; /* Min average length threshold (bytes) */ __u32 qth_min; /* Min average length threshold (bytes) */
......
...@@ -404,6 +404,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = { ...@@ -404,6 +404,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
[TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) }, [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
[TCA_GRED_MAX_P] = { .type = NLA_U32 }, [TCA_GRED_MAX_P] = { .type = NLA_U32 },
[TCA_GRED_LIMIT] = { .type = NLA_U32 }, [TCA_GRED_LIMIT] = { .type = NLA_U32 },
[TCA_GRED_VQ_LIST] = { .type = NLA_REJECT },
}; };
static int gred_change(struct Qdisc *sch, struct nlattr *opt, static int gred_change(struct Qdisc *sch, struct nlattr *opt,
...@@ -517,7 +518,7 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt, ...@@ -517,7 +518,7 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt,
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
{ {
struct gred_sched *table = qdisc_priv(sch); struct gred_sched *table = qdisc_priv(sch);
struct nlattr *parms, *opts = NULL; struct nlattr *parms, *vqs, *opts = NULL;
int i; int i;
u32 max_p[MAX_DPs]; u32 max_p[MAX_DPs];
struct tc_gred_sopt sopt = { struct tc_gred_sopt sopt = {
...@@ -544,6 +545,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -544,6 +545,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit)) if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
goto nla_put_failure; goto nla_put_failure;
/* Old style all-in-one dump of VQs */
parms = nla_nest_start(skb, TCA_GRED_PARMS); parms = nla_nest_start(skb, TCA_GRED_PARMS);
if (parms == NULL) if (parms == NULL)
goto nla_put_failure; goto nla_put_failure;
...@@ -594,6 +596,55 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb) ...@@ -594,6 +596,55 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
nla_nest_end(skb, parms); nla_nest_end(skb, parms);
/* Dump the VQs again, in more structured way */
vqs = nla_nest_start(skb, TCA_GRED_VQ_LIST);
if (!vqs)
goto nla_put_failure;
for (i = 0; i < MAX_DPs; i++) {
struct gred_sched_data *q = table->tab[i];
struct nlattr *vq;
if (!q)
continue;
vq = nla_nest_start(skb, TCA_GRED_VQ_ENTRY);
if (!vq)
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
goto nla_put_failure;
/* Stats */
if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
TCA_GRED_VQ_PAD))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
gred_backlog(table, q, sch)))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
q->stats.prob_drop))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
q->stats.prob_mark))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
q->stats.forced_drop))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
q->stats.forced_mark))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
goto nla_put_failure;
if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
goto nla_put_failure;
nla_nest_end(skb, vq);
}
nla_nest_end(skb, vqs);
return nla_nest_end(skb, opts); return nla_nest_end(skb, opts);
nla_put_failure: nla_put_failure:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册