aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2022-09-02 14:10:47 -0700
committerDaniel Borkmann <daniel@iogearbox.net>2022-09-05 15:33:05 +0200
commit34dd3bad1a6f1dc7d18ee8dd53f1d31bffd2aee8 (patch)
tree583d488e537f0938fb920c938bfecf5ef181bf85 /kernel/bpf
parentsamples/bpf: Reduce syscall overhead in map_perf_test. (diff)
downloadlinux-dev-34dd3bad1a6f1dc7d18ee8dd53f1d31bffd2aee8.tar.xz
linux-dev-34dd3bad1a6f1dc7d18ee8dd53f1d31bffd2aee8.zip
bpf: Relax the requirement to use preallocated hash maps in tracing progs.
Since bpf hash map was converted to use bpf_mem_alloc it is safe to use from tracing programs and in RT kernels. But per-cpu hash map is still using dynamic allocation for per-cpu map values, hence keep the warning for this map type. In the future alloc_percpu_gfp can be front-end-ed with bpf_mem_cache and this restriction will be completely lifted. perf_event (NMI) bpf programs have to use preallocated hash maps, because free_htab_elem() is using call_rcu which might crash if re-entered. Sleepable bpf programs have to use preallocated hash maps, because life time of the map elements is not protected by rcu_read_lock/unlock. This restriction can be lifted in the future as well. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Kumar Kartikeya Dwivedi <memxor@gmail.com> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20220902211058.60789-6-alexei.starovoitov@gmail.com
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/verifier.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 0194a36d0b36..3dce3166855f 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -12629,10 +12629,12 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
* For programs attached to PERF events this is mandatory as the
* perf NMI can hit any arbitrary code sequence.
*
- * All other trace types using preallocated hash maps are unsafe as
- * well because tracepoint or kprobes can be inside locked regions
- * of the memory allocator or at a place where a recursion into the
- * memory allocator would see inconsistent state.
+ * All other trace types using non-preallocated per-cpu hash maps are
+ * unsafe as well because tracepoint or kprobes can be inside locked
+ * regions of the per-cpu memory allocator or at a place where a
+ * recursion into the per-cpu memory allocator would see inconsistent
+ * state. Non per-cpu hash maps are using bpf_mem_alloc-tor which is
+ * safe to use from kprobe/fentry and in RT.
*
* On RT enabled kernels run-time allocation of all trace type
* programs is strictly prohibited due to lock type constraints. On
@@ -12642,15 +12644,26 @@ static int check_map_prog_compatibility(struct bpf_verifier_env *env,
*/
if (is_tracing_prog_type(prog_type) && !is_preallocated_map(map)) {
if (prog_type == BPF_PROG_TYPE_PERF_EVENT) {
+ /* perf_event bpf progs have to use preallocated hash maps
+ * because non-prealloc is still relying on call_rcu to free
+ * elements.
+ */
verbose(env, "perf_event programs can only use preallocated hash map\n");
return -EINVAL;
}
- if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
- verbose(env, "trace type programs can only use preallocated hash map\n");
- return -EINVAL;
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ (map->inner_map_meta &&
+ map->inner_map_meta->map_type == BPF_MAP_TYPE_PERCPU_HASH)) {
+ if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
+ verbose(env,
+ "trace type programs can only use preallocated per-cpu hash map\n");
+ return -EINVAL;
+ }
+ WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
+ verbose(env,
+ "trace type programs with run-time allocated per-cpu hash maps are unsafe."
+ " Switch to preallocated hash maps.\n");
}
- WARN_ONCE(1, "trace type BPF program uses run-time allocation\n");
- verbose(env, "trace type programs with run-time allocated hash maps are unsafe. Switch to preallocated hash maps.\n");
}
if (map_value_has_spin_lock(map)) {