aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/hashtab.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2022-09-02 14:10:58 -0700
committerDaniel Borkmann <daniel@iogearbox.net>2022-09-05 15:33:07 +0200
commit9f2c6e96c65e6fa1aebef546be0c30a5895fcb37 (patch)
treed657c02f101e19b20ed3b720a044606e99f870a3 /kernel/bpf/hashtab.c
parentbpf: Remove usage of kmem_cache from bpf_mem_cache. (diff)
downloadlinux-dev-9f2c6e96c65e6fa1aebef546be0c30a5895fcb37.tar.xz
linux-dev-9f2c6e96c65e6fa1aebef546be0c30a5895fcb37.zip
bpf: Optimize rcu_barrier usage between hash map and bpf_mem_alloc.
User space might be creating and destroying a lot of hash maps. Synchronous rcu_barrier-s in a destruction path of hash map delay freeing of hash buckets and other map memory and may cause artificial OOM situation under stress. Optimize rcu_barrier usage between bpf hash map and bpf_mem_alloc: - remove rcu_barrier from hash map, since htab doesn't use call_rcu directly and there are no callback to wait for. - bpf_mem_alloc has call_rcu_in_progress flag that indicates pending callbacks. Use it to avoid barriers in fast path. - When barriers are needed copy bpf_mem_alloc into temp structure and wait for rcu barrier-s in the worker to let the rest of hash map freeing to proceed. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Link: https://lore.kernel.org/bpf/20220902211058.60789-17-alexei.starovoitov@gmail.com
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r--kernel/bpf/hashtab.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index a77b9c4a4e48..0fe3f136cbbe 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1546,10 +1546,10 @@ static void htab_map_free(struct bpf_map *map)
* There is no need to synchronize_rcu() here to protect map elements.
*/
- /* some of free_htab_elem() callbacks for elements of this map may
- * not have executed. Wait for them.
+ /* htab no longer uses call_rcu() directly. bpf_mem_alloc does it
+ * underneath and is reponsible for waiting for callbacks to finish
+ * during bpf_mem_alloc_destroy().
*/
- rcu_barrier();
if (!htab_is_prealloc(htab)) {
delete_all_elements(htab);
} else {