aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_gred.c
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2018-11-14 22:23:49 -0800
committerDavid S. Miller <davem@davemloft.net>2018-11-16 23:08:51 -0800
commit80e22e961dfd15530215f6f6dcd94cd8f65ba1ea (patch)
tree9c2f8c1082a152c6a0bca4a790c042760a053c2c /net/sched/sch_gred.c
parentnet: sched: gred: store bytesin as a 64 bit value (diff)
downloadlinux-dev-80e22e961dfd15530215f6f6dcd94cd8f65ba1ea.tar.xz
linux-dev-80e22e961dfd15530215f6f6dcd94cd8f65ba1ea.zip
net: sched: gred: provide a better structured dump and expose stats
Currently all GRED's virtual queue data is dumped in a single array in a single attribute. This makes it pretty much impossible to add new fields. In order to expose more detailed stats add a new set of attributes. We can now expose the 64 bit value of bytesin and all the mark stats which were not part of the original design. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: John Hurley <john.hurley@netronome.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_gred.c')
-rw-r--r--net/sched/sch_gred.c53
1 files changed, 52 insertions, 1 deletions
diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 6f209c83ee7a..dc09a32c4b4f 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -404,6 +404,7 @@ static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
[TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
[TCA_GRED_MAX_P] = { .type = NLA_U32 },
[TCA_GRED_LIMIT] = { .type = NLA_U32 },
+ [TCA_GRED_VQ_LIST] = { .type = NLA_REJECT },
};
static int gred_change(struct Qdisc *sch, struct nlattr *opt,
@@ -517,7 +518,7 @@ static int gred_init(struct Qdisc *sch, struct nlattr *opt,
static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
{
struct gred_sched *table = qdisc_priv(sch);
- struct nlattr *parms, *opts = NULL;
+ struct nlattr *parms, *vqs, *opts = NULL;
int i;
u32 max_p[MAX_DPs];
struct tc_gred_sopt sopt = {
@@ -544,6 +545,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
goto nla_put_failure;
+ /* Old style all-in-one dump of VQs */
parms = nla_nest_start(skb, TCA_GRED_PARMS);
if (parms == NULL)
goto nla_put_failure;
@@ -594,6 +596,55 @@ append_opt:
nla_nest_end(skb, parms);
+ /* Dump the VQs again, in more structured way */
+ vqs = nla_nest_start(skb, TCA_GRED_VQ_LIST);
+ if (!vqs)
+ goto nla_put_failure;
+
+ for (i = 0; i < MAX_DPs; i++) {
+ struct gred_sched_data *q = table->tab[i];
+ struct nlattr *vq;
+
+ if (!q)
+ continue;
+
+ vq = nla_nest_start(skb, TCA_GRED_VQ_ENTRY);
+ if (!vq)
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
+ goto nla_put_failure;
+
+ /* Stats */
+ if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
+ TCA_GRED_VQ_PAD))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
+ gred_backlog(table, q, sch)))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
+ q->stats.prob_drop))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
+ q->stats.prob_mark))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
+ q->stats.forced_drop))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
+ q->stats.forced_mark))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
+ goto nla_put_failure;
+ if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
+ goto nla_put_failure;
+
+ nla_nest_end(skb, vq);
+ }
+ nla_nest_end(skb, vqs);
+
return nla_nest_end(skb, opts);
nla_put_failure: