aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/perf/util/bpf_skel/sample_filter.bpf.c
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2023-03-14 16:42:29 -0700
committerArnaldo Carvalho de Melo <acme@redhat.com>2023-03-15 10:34:33 -0300
commit56ec9457a4a20c5e07ad94bfb6e23077d54cb28e (patch)
treecafe1144949eb53e95bfbbfc4e2ce9fb87e04c1f /tools/perf/util/bpf_skel/sample_filter.bpf.c
parentperf bpf filter: Introduce basic BPF filter expression (diff)
downloadwireguard-linux-56ec9457a4a20c5e07ad94bfb6e23077d54cb28e.tar.xz
wireguard-linux-56ec9457a4a20c5e07ad94bfb6e23077d54cb28e.zip
perf bpf filter: Implement event sample filtering
The BPF program will be attached to a perf_event and be triggered when it overflows. It'd iterate the filters map and compare the sample value according to the expression. If any of them fails, the sample would be dropped. Also it needs to have the corresponding sample data for the expression so it compares data->sample_flags with the given value. To access the sample data, it uses the bpf_cast_to_kern_ctx() kfunc which was added in v6.2 kernel. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Hao Luo <haoluo@google.com> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: James Clark <james.clark@arm.com> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ravi Bangoria <ravi.bangoria@amd.com> Cc: Song Liu <song@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: bpf@vger.kernel.org Link: https://lore.kernel.org/r/20230314234237.3008956-2-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/bpf_skel/sample_filter.bpf.c')
-rw-r--r--tools/perf/util/bpf_skel/sample_filter.bpf.c126
1 files changed, 126 insertions, 0 deletions
diff --git a/tools/perf/util/bpf_skel/sample_filter.bpf.c b/tools/perf/util/bpf_skel/sample_filter.bpf.c
new file mode 100644
index 000000000000..c07256279c3e
--- /dev/null
+++ b/tools/perf/util/bpf_skel/sample_filter.bpf.c
@@ -0,0 +1,126 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2023 Google
+#include "vmlinux.h"
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include <bpf/bpf_core_read.h>
+
+#include "sample-filter.h"
+
+/* BPF map that will be filled by user space */
+struct filters {
+ __uint(type, BPF_MAP_TYPE_ARRAY);
+ __type(key, int);
+ __type(value, struct perf_bpf_filter_entry);
+ __uint(max_entries, MAX_FILTERS);
+} filters SEC(".maps");
+
+int dropped;
+
+void *bpf_cast_to_kern_ctx(void *) __ksym;
+
+/* new kernel perf_sample_data definition */
+struct perf_sample_data___new {
+ __u64 sample_flags;
+} __attribute__((preserve_access_index));
+
+/* helper function to return the given perf sample data */
+static inline __u64 perf_get_sample(struct bpf_perf_event_data_kern *kctx,
+ struct perf_bpf_filter_entry *entry)
+{
+ struct perf_sample_data___new *data = (void *)kctx->data;
+
+ if (!bpf_core_field_exists(data->sample_flags) ||
+ (data->sample_flags & entry->flags) == 0)
+ return 0;
+
+ switch (entry->flags) {
+ case PERF_SAMPLE_IP:
+ return kctx->data->ip;
+ case PERF_SAMPLE_ID:
+ return kctx->data->id;
+ case PERF_SAMPLE_TID:
+ return kctx->data->tid_entry.tid;
+ case PERF_SAMPLE_CPU:
+ return kctx->data->cpu_entry.cpu;
+ case PERF_SAMPLE_TIME:
+ return kctx->data->time;
+ case PERF_SAMPLE_ADDR:
+ return kctx->data->addr;
+ case PERF_SAMPLE_PERIOD:
+ return kctx->data->period;
+ case PERF_SAMPLE_TRANSACTION:
+ return kctx->data->txn;
+ case PERF_SAMPLE_WEIGHT:
+ return kctx->data->weight.full;
+ case PERF_SAMPLE_PHYS_ADDR:
+ return kctx->data->phys_addr;
+ case PERF_SAMPLE_CODE_PAGE_SIZE:
+ return kctx->data->code_page_size;
+ case PERF_SAMPLE_DATA_PAGE_SIZE:
+ return kctx->data->data_page_size;
+ default:
+ break;
+ }
+ return 0;
+}
+
+/* BPF program to be called from perf event overflow handler */
+SEC("perf_event")
+int perf_sample_filter(void *ctx)
+{
+ struct bpf_perf_event_data_kern *kctx;
+ struct perf_bpf_filter_entry *entry;
+ __u64 sample_data;
+ int i;
+
+ kctx = bpf_cast_to_kern_ctx(ctx);
+
+ for (i = 0; i < MAX_FILTERS; i++) {
+ int key = i; /* needed for verifier :( */
+
+ entry = bpf_map_lookup_elem(&filters, &key);
+ if (entry == NULL)
+ break;
+ sample_data = perf_get_sample(kctx, entry);
+
+ switch (entry->op) {
+ case PBF_OP_EQ:
+ if (!(sample_data == entry->value))
+ goto drop;
+ break;
+ case PBF_OP_NEQ:
+ if (!(sample_data != entry->value))
+ goto drop;
+ break;
+ case PBF_OP_GT:
+ if (!(sample_data > entry->value))
+ goto drop;
+ break;
+ case PBF_OP_GE:
+ if (!(sample_data >= entry->value))
+ goto drop;
+ break;
+ case PBF_OP_LT:
+ if (!(sample_data < entry->value))
+ goto drop;
+ break;
+ case PBF_OP_LE:
+ if (!(sample_data <= entry->value))
+ goto drop;
+ break;
+ case PBF_OP_AND:
+ if (!(sample_data & entry->value))
+ goto drop;
+ break;
+ }
+ }
+ /* generate sample data */
+ return 1;
+
+drop:
+ __sync_fetch_and_add(&dropped, 1);
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";