aboutsummaryrefslogtreecommitdiffstats
path: root/net/sched/sch_htb.c
diff options
context:
space:
mode:
authorAhmed S. Darwish <a.darwish@linutronix.de>2021-10-16 10:49:07 +0200
committerDavid S. Miller <davem@davemloft.net>2021-10-18 12:54:41 +0100
commit67c9e6270f3013e4d86ec57c4e7f27459f2a0652 (patch)
tree69cab1b15c62cd3dd449820ceebdd18107114c68 /net/sched/sch_htb.c
parentu64_stats: Introduce u64_stats_set() (diff)
downloadlinux-dev-67c9e6270f3013e4d86ec57c4e7f27459f2a0652.tar.xz
linux-dev-67c9e6270f3013e4d86ec57c4e7f27459f2a0652.zip
net: sched: Protect Qdisc::bstats with u64_stats
The not-per-CPU variant of qdisc tc (traffic control) statistics, Qdisc::gnet_stats_basic_packed bstats, is protected with Qdisc::running sequence counter. This sequence counter is used for reliably protecting bstats reads from parallel writes. Meanwhile, the seqcount's write section covers a much wider area than bstats update: qdisc_run_begin() => qdisc_run_end(). That read/write section asymmetry can lead to needless retries of the read section. To prepare for removing the Qdisc::running sequence counter altogether, introduce a u64_stats sync point inside bstats instead. Modify _bstats_update() to start/end the bstats u64_stats write section. For bisectability, and finer commits granularity, the bstats read section is still protected with a Qdisc::running read/retry loop and qdisc_run_begin/end() still starts/ends that seqcount write section. Once all call sites are modified to use _bstats_update(), the Qdisc::running seqcount will be removed and bstats read/retry loop will be modified to utilize the internal u64_stats sync point. Note, using u64_stats implies no sequence counter protection for 64-bit architectures. This can lead to the statistics "packets" vs. "bytes" values getting out of sync on rare occasions. The individual values will still be valid. [bigeasy: Minor commit message edits, init all gnet_stats_basic_packed.] Signed-off-by: Ahmed S. Darwish <a.darwish@linutronix.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/sched/sch_htb.c')
-rw-r--r--net/sched/sch_htb.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 5067a6e5d4fd..2e805b17efcf 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1311,7 +1311,7 @@ static void htb_offload_aggregate_stats(struct htb_sched *q,
struct htb_class *c;
unsigned int i;
- memset(&cl->bstats, 0, sizeof(cl->bstats));
+ gnet_stats_basic_packed_init(&cl->bstats);
for (i = 0; i < q->clhash.hashsize; i++) {
hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
@@ -1357,7 +1357,7 @@ htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
if (cl->leaf.q)
cl->bstats = cl->leaf.q->bstats;
else
- memset(&cl->bstats, 0, sizeof(cl->bstats));
+ gnet_stats_basic_packed_init(&cl->bstats);
cl->bstats.bytes += cl->bstats_bias.bytes;
cl->bstats.packets += cl->bstats_bias.packets;
} else {
@@ -1849,6 +1849,9 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
if (!cl)
goto failure;
+ gnet_stats_basic_packed_init(&cl->bstats);
+ gnet_stats_basic_packed_init(&cl->bstats_bias);
+
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
if (err) {
kfree(cl);