aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/net
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2020-05-07 18:58:10 -0700
committerJakub Kicinski <kuba@kernel.org>2020-05-08 21:33:33 -0700
commitcf86a086a18095e33e0637cb78cda1fcf5280852 (patch)
treec6d0569026ce134c625680f2946b9bbcdb8b0765 /include/net
parentnet: relax SO_TXTIME CAP_NET_ADMIN check (diff)
downloadwireguard-linux-cf86a086a18095e33e0637cb78cda1fcf5280852.tar.xz
wireguard-linux-cf86a086a18095e33e0637cb78cda1fcf5280852.zip
net/dst: use a smaller percpu_counter batch for dst entries accounting
percpu_counter_add() uses a default batch size which is quite big on platforms with 256 cpus. (2*256 -> 512) This means dst_entries_get_fast() can be off by +/- 2*(nr_cpus^2) (131072 on servers with 256 cpus) Reduce the batch size to something more reasonable, and add logic to ip6_dst_gc() to call dst_entries_get_slow() before calling the _very_ expensive fib6_run_gc() function. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'include/net')
-rw-r--r--include/net/dst_ops.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 443863c7b8da..88ff7bb2bb9b 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -53,9 +53,11 @@ static inline int dst_entries_get_slow(struct dst_ops *dst)
return percpu_counter_sum_positive(&dst->pcpuc_entries);
}
+#define DST_PERCPU_COUNTER_BATCH 32
static inline void dst_entries_add(struct dst_ops *dst, int val)
{
- percpu_counter_add(&dst->pcpuc_entries, val);
+ percpu_counter_add_batch(&dst->pcpuc_entries, val,
+ DST_PERCPU_COUNTER_BATCH);
}
static inline int dst_entries_init(struct dst_ops *dst)