aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/bpf_trace.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-09-27 16:23:32 +0200
committerDavid S. Miller <davem@davemloft.net>2019-09-27 16:23:32 +0200
commit3c30819dc68ab9498216421b6846123900c0a6d3 (patch)
tree2da4d8143b0a9f171cb38ba70ca9c1f339b24cef /kernel/trace/bpf_trace.c
parentMerge branch 'qdisc-destroy' (diff)
parentbpf: Fix bpf_event_output re-entry issue (diff)
downloadlinux-dev-3c30819dc68ab9498216421b6846123900c0a6d3.tar.xz
linux-dev-3c30819dc68ab9498216421b6846123900c0a6d3.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2019-09-27 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix libbpf's BTF dumper to not skip anonymous enum definitions, from Andrii. 2) Fix BTF verifier issues when handling the BTF of vmlinux, from Alexei. 3) Fix nested calls into bpf_event_output() from TCP sockops BPF programs, from Allan. 4) Fix NULL pointer dereference in AF_XDP's xsk map creation when allocation fails, from Jonathan. 5) Remove unneeded 64 byte alignment requirement of the AF_XDP UMEM headroom, from Bjorn. 6) Remove unused XDP_OPTIONS getsockopt() call which results in an error on older kernels, from Toke. 7) Fix a client/server race in tcp_rtt BPF kselftest case, from Stanislav. 8) Fix indentation issue in BTF's btf_enum_check_kflag_member(), from Colin. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r--kernel/trace/bpf_trace.c26
1 files changed, 21 insertions, 5 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index ca1255d14576..3e38a010003c 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -500,14 +500,17 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
.arg5_type = ARG_CONST_SIZE_OR_ZERO,
};
-static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
-static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
+static DEFINE_PER_CPU(int, bpf_event_output_nest_level);
+struct bpf_nested_pt_regs {
+ struct pt_regs regs[3];
+};
+static DEFINE_PER_CPU(struct bpf_nested_pt_regs, bpf_pt_regs);
+static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds);
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
{
- struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
- struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
+ int nest_level = this_cpu_inc_return(bpf_event_output_nest_level);
struct perf_raw_frag frag = {
.copy = ctx_copy,
.size = ctx_size,
@@ -522,12 +525,25 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
.data = meta,
},
};
+ struct perf_sample_data *sd;
+ struct pt_regs *regs;
+ u64 ret;
+
+ if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) {
+ ret = -EBUSY;
+ goto out;
+ }
+ sd = this_cpu_ptr(&bpf_misc_sds.sds[nest_level - 1]);
+ regs = this_cpu_ptr(&bpf_pt_regs.regs[nest_level - 1]);
perf_fetch_caller_regs(regs);
perf_sample_data_init(sd, 0, 0);
sd->raw = &raw;
- return __bpf_perf_event_output(regs, map, flags, sd);
+ ret = __bpf_perf_event_output(regs, map, flags, sd);
+out:
+ this_cpu_dec(bpf_event_output_nest_level);
+ return ret;
}
BPF_CALL_0(bpf_get_current_task)