aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/trace
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2017-06-11 00:50:40 +0200
committerDavid S. Miller <davem@davemloft.net>2017-06-10 19:05:45 -0400
commit20b9d7ac48526ce9a14106241e76e8382d126a60 (patch)
tree8e5e133552c45aaf6eddf0e61c88939f2df57695 /kernel/trace
parentnet: fec: Add a fec_enet_clear_ethtool_stats() stub for CONFIG_M5272 (diff)
downloadwireguard-linux-20b9d7ac48526ce9a14106241e76e8382d126a60.tar.xz
wireguard-linux-20b9d7ac48526ce9a14106241e76e8382d126a60.zip
bpf: avoid excessive stack usage for perf_sample_data
perf_sample_data consumes 386 bytes on stack, reduce excessive stack usage and move it to per cpu buffer. It's allowed due to preemption being disabled for tracing, xdp and tc programs, thus at all times only one program can run on a specific CPU and programs cannot run from interrupt. We similarly also handle bpf_pt_regs. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/bpf_trace.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 08eb072430b9..051d7fca0c09 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -266,14 +266,16 @@ static const struct bpf_func_proto bpf_perf_event_read_proto = {
.arg2_type = ARG_ANYTHING,
};
+static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd);
+
static __always_inline u64
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
u64 flags, struct perf_raw_record *raw)
{
struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
unsigned int cpu = smp_processor_id();
u64 index = flags & BPF_F_INDEX_MASK;
- struct perf_sample_data sample_data;
struct bpf_event_entry *ee;
struct perf_event *event;
@@ -294,9 +296,9 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
if (unlikely(event->oncpu != cpu))
return -EOPNOTSUPP;
- perf_sample_data_init(&sample_data, 0, 0);
- sample_data.raw = raw;
- perf_event_output(event, &sample_data, regs);
+ perf_sample_data_init(sd, 0, 0);
+ sd->raw = raw;
+ perf_event_output(event, sd, regs);
return 0;
}