aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/trampoline.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2021-02-09 19:36:31 -0800
committerDaniel Borkmann <daniel@iogearbox.net>2021-02-11 16:19:20 +0100
commit9ed9e9ba2337205311398a312796c213737bac35 (patch)
tree361082a8f4e91f8d2891a77168843bdce88a9958 /kernel/bpf/trampoline.c
parentselftest/bpf: Add a recursion test (diff)
downloadlinux-dev-9ed9e9ba2337205311398a312796c213737bac35.tar.xz
linux-dev-9ed9e9ba2337205311398a312796c213737bac35.zip
bpf: Count the number of times recursion was prevented
Add per-program counter for number of times recursion prevention mechanism was triggered and expose it via show_fdinfo and bpf_prog_info. Teach bpftool to print it. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Andrii Nakryiko <andrii@kernel.org> Link: https://lore.kernel.org/bpf/20210210033634.62081-7-alexei.starovoitov@gmail.com
Diffstat (limited to 'kernel/bpf/trampoline.c')
-rw-r--r--kernel/bpf/trampoline.c18
1 files changed, 16 insertions, 2 deletions
diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c
index 89ef6320d19b..7bc3b3209224 100644
--- a/kernel/bpf/trampoline.c
+++ b/kernel/bpf/trampoline.c
@@ -394,6 +394,16 @@ static u64 notrace bpf_prog_start_time(void)
return start;
}
+static void notrace inc_misses_counter(struct bpf_prog *prog)
+{
+ struct bpf_prog_stats *stats;
+
+ stats = this_cpu_ptr(prog->stats);
+ u64_stats_update_begin(&stats->syncp);
+ stats->misses++;
+ u64_stats_update_end(&stats->syncp);
+}
+
/* The logic is similar to BPF_PROG_RUN, but with an explicit
* rcu_read_lock() and migrate_disable() which are required
* for the trampoline. The macro is split into
@@ -412,8 +422,10 @@ u64 notrace __bpf_prog_enter(struct bpf_prog *prog)
{
rcu_read_lock();
migrate_disable();
- if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+ if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+ inc_misses_counter(prog);
return 0;
+ }
return bpf_prog_start_time();
}
@@ -451,8 +463,10 @@ u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog)
rcu_read_lock_trace();
migrate_disable();
might_fault();
- if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1))
+ if (unlikely(__this_cpu_inc_return(*(prog->active)) != 1)) {
+ inc_misses_counter(prog);
return 0;
+ }
return bpf_prog_start_time();
}