aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/hashtab.c
diff options
context:
space:
mode:
authorHou Tao <houtao1@huawei.com>2022-08-31 12:26:28 +0800
committerMartin KaFai Lau <martin.lau@kernel.org>2022-08-31 14:10:01 -0700
commit66a7a92e4d0d091e79148a4c6ec15d1da65f4280 (patch)
tree98ec0e1fb6c9359d6b586aa44ea44b4309f74f23 /kernel/bpf/hashtab.c
parentbpf: Disable preemption when increasing per-cpu map_locked (diff)
downloadlinux-dev-66a7a92e4d0d091e79148a4c6ec15d1da65f4280.tar.xz
linux-dev-66a7a92e4d0d091e79148a4c6ec15d1da65f4280.zip
bpf: Propagate error from htab_lock_bucket() to userspace
In __htab_map_lookup_and_delete_batch() if htab_lock_bucket() returns -EBUSY, it will go to next bucket. Going to next bucket may not only skip the elements in current bucket silently, but also incur out-of-bound memory access or expose kernel memory to userspace if current bucket_cnt is greater than bucket_size or zero. Fixing it by stopping batch operation and returning -EBUSY when htab_lock_bucket() fails, and the application can retry or skip the busy batch as needed. Fixes: 20b6cc34ea74 ("bpf: Avoid hashtab deadlock with map_locked") Reported-by: Hao Sun <sunhao.th@gmail.com> Signed-off-by: Hou Tao <houtao1@huawei.com> Link: https://lore.kernel.org/r/20220831042629.130006-3-houtao@huaweicloud.com Signed-off-by: Martin KaFai Lau <martin.lau@kernel.org>
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r--kernel/bpf/hashtab.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 6fb3b7fd1622..eb1263f03e9b 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -1704,8 +1704,11 @@ again_nocopy:
/* do not grab the lock unless need it (bucket_cnt > 0). */
if (locked) {
ret = htab_lock_bucket(htab, b, batch, &flags);
- if (ret)
- goto next_batch;
+ if (ret) {
+ rcu_read_unlock();
+ bpf_enable_instrumentation();
+ goto after_loop;
+ }
}
bucket_cnt = 0;