aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/samples/bpf/trace_event_kern.c
diff options
context:
space:
mode:
authorDaniel T. Lee <danieltimlee@gmail.com>2020-05-16 13:06:08 +0900
committerDaniel Borkmann <daniel@iogearbox.net>2020-05-19 17:13:03 +0200
commit59929cd1fec508a48ea2a04d8f2e4fdef907a2cd (patch)
tree2daef9645a717e2ad5ac0dbdfc89e92cd0738adf /samples/bpf/trace_event_kern.c
parentsamples, bpf: Add tracex7 test file to .gitignore (diff)
downloadwireguard-linux-59929cd1fec508a48ea2a04d8f2e4fdef907a2cd.tar.xz
wireguard-linux-59929cd1fec508a48ea2a04d8f2e4fdef907a2cd.zip
samples, bpf: Refactor kprobe, tail call kern progs map definition
Because the previous two commit replaced the bpf_load implementation of the user program with libbpf, the corresponding kernel program's MAP definition can be replaced with new BTF-defined map syntax. This commit only updates the samples which uses libbpf API for loading bpf program not with bpf_load. Signed-off-by: Daniel T. Lee <danieltimlee@gmail.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Yonghong Song <yhs@fb.com> Link: https://lore.kernel.org/bpf/20200516040608.1377876-6-danieltimlee@gmail.com
Diffstat (limited to 'samples/bpf/trace_event_kern.c')
-rw-r--r--samples/bpf/trace_event_kern.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/samples/bpf/trace_event_kern.c b/samples/bpf/trace_event_kern.c
index da1d69e20645..7d3c66fb3f88 100644
--- a/samples/bpf/trace_event_kern.c
+++ b/samples/bpf/trace_event_kern.c
@@ -18,19 +18,19 @@ struct key_t {
u32 userstack;
};
-struct bpf_map_def SEC("maps") counts = {
- .type = BPF_MAP_TYPE_HASH,
- .key_size = sizeof(struct key_t),
- .value_size = sizeof(u64),
- .max_entries = 10000,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __type(key, struct key_t);
+ __type(value, u64);
+ __uint(max_entries, 10000);
+} counts SEC(".maps");
-struct bpf_map_def SEC("maps") stackmap = {
- .type = BPF_MAP_TYPE_STACK_TRACE,
- .key_size = sizeof(u32),
- .value_size = PERF_MAX_STACK_DEPTH * sizeof(u64),
- .max_entries = 10000,
-};
+struct {
+ __uint(type, BPF_MAP_TYPE_STACK_TRACE);
+ __uint(key_size, sizeof(u32));
+ __uint(value_size, PERF_MAX_STACK_DEPTH * sizeof(u64));
+ __uint(max_entries, 10000);
+} stackmap SEC(".maps");
#define KERN_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP)
#define USER_STACKID_FLAGS (0 | BPF_F_FAST_STACK_CMP | BPF_F_USER_STACK)