aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2022-09-02 14:10:58 -0700
committerDaniel Borkmann <daniel@iogearbox.net>2022-09-05 15:33:07 +0200
commit9f2c6e96c65e6fa1aebef546be0c30a5895fcb37 (patch)
treed657c02f101e19b20ed3b720a044606e99f870a3 /include
parentbpf: Remove usage of kmem_cache from bpf_mem_cache. (diff)
downloadlinux-dev-9f2c6e96c65e6fa1aebef546be0c30a5895fcb37.tar.xz
linux-dev-9f2c6e96c65e6fa1aebef546be0c30a5895fcb37.zip
bpf: Optimize rcu_barrier usage between hash map and bpf_mem_alloc.
User space might be creating and destroying a lot of hash maps. Synchronous rcu_barrier-s in a destruction path of hash map delay freeing of hash buckets and other map memory and may cause artificial OOM situation under stress. Optimize rcu_barrier usage between bpf hash map and bpf_mem_alloc: - remove rcu_barrier from hash map, since htab doesn't use call_rcu directly and there are no callback to wait for. - bpf_mem_alloc has call_rcu_in_progress flag that indicates pending callbacks. Use it to avoid barriers in fast path. - When barriers are needed copy bpf_mem_alloc into temp structure and wait for rcu barrier-s in the worker to let the rest of hash map freeing to proceed. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20220902211058.60789-17-alexei.starovoitov@gmail.com
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf_mem_alloc.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index 653ed1584a03..3e164b8efaa9 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -3,6 +3,7 @@
#ifndef _BPF_MEM_ALLOC_H
#define _BPF_MEM_ALLOC_H
#include <linux/compiler_types.h>
+#include <linux/workqueue.h>
struct bpf_mem_cache;
struct bpf_mem_caches;
@@ -10,6 +11,7 @@ struct bpf_mem_caches;
struct bpf_mem_alloc {
struct bpf_mem_caches __percpu *caches;
struct bpf_mem_cache __percpu *cache;
+ struct work_struct work;
};
int bpf_mem_alloc_init(struct bpf_mem_alloc *ma, int size, bool percpu);