aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2020-02-24 15:01:38 +0100
committerAlexei Starovoitov <ast@kernel.org>2020-02-24 16:18:20 -0800
commit1d7bf6b7d3e8353c3fac648f3f9b3010458570c2 (patch)
tree78fa90b9a7793a15078ea197fc19afb96539e356 /kernel/events
parentbpf/trace: Remove redundant preempt_disable from trace_call_bpf() (diff)
downloadlinux-dev-1d7bf6b7d3e8353c3fac648f3f9b3010458570c2.tar.xz
linux-dev-1d7bf6b7d3e8353c3fac648f3f9b3010458570c2.zip
perf/bpf: Remove preempt disable around BPF invocation
The BPF invocation from the perf event overflow handler does not require to disable preemption because this is called from NMI or at least hard interrupt context which is already non-preemptible. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20200224145643.151953573@linutronix.de
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c2
1 files changed, 0 insertions, 2 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e453589da97c..bbdfac0182f4 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -9206,7 +9206,6 @@ static void bpf_overflow_handler(struct perf_event *event,
int ret = 0;
ctx.regs = perf_arch_bpf_user_pt_regs(regs);
- preempt_disable();
if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1))
goto out;
rcu_read_lock();
@@ -9214,7 +9213,6 @@ static void bpf_overflow_handler(struct perf_event *event,
rcu_read_unlock();
out:
__this_cpu_dec(bpf_prog_active);
- preempt_enable();
if (!ret)
return;