aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/bpf_counter.h
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2021-06-25 00:18:25 -0700
committerArnaldo Carvalho de Melo <acme@redhat.com>2021-07-01 16:14:19 -0300
commitd6a735ef3277c45f48c911b98669174bc4b650d9 (patch)
treee0038ac2f047c4a753485631f77c83c5ae094d22 /tools/perf/util/bpf_counter.h
parentperf tools: Add cgroup_is_v2() helper (diff)
downloadlinux-dev-d6a735ef3277c45f48c911b98669174bc4b650d9.tar.xz
linux-dev-d6a735ef3277c45f48c911b98669174bc4b650d9.zip
perf bpf_counter: Move common functions to bpf_counter.h
Some helper functions will be used for cgroup counting too. Move them to a header file for sharing. Committer notes: Fix the build on older systems with: - struct bpf_map_info map_info = {0}; + struct bpf_map_info map_info = { .id = 0, }; This wasn't breaking the build in such systems as bpf_counter.c isn't built due to: tools/perf/util/Build: perf-$(CONFIG_PERF_BPF_SKEL) += bpf_counter.o The bpf_counter.h file on the other hand is included from places that are built everywhere. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Acked-by: Song Liu <songliubraving@fb.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Ian Rogers <irogers@google.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Link: http://lore.kernel.org/lkml/20210625071826.608504-4-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/bpf_counter.h')
-rw-r--r--tools/perf/util/bpf_counter.h52
1 files changed, 52 insertions, 0 deletions
diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
index d6d907c3dcf9..65ebaa6694fb 100644
--- a/tools/perf/util/bpf_counter.h
+++ b/tools/perf/util/bpf_counter.h
@@ -3,6 +3,10 @@
#define __PERF_BPF_COUNTER_H 1
#include <linux/list.h>
+#include <sys/resource.h>
+#include <bpf/bpf.h>
+#include <bpf/btf.h>
+#include <bpf/libbpf.h>
struct evsel;
struct target;
@@ -76,4 +80,52 @@ static inline int bpf_counter__install_pe(struct evsel *evsel __maybe_unused,
#endif /* HAVE_BPF_SKEL */
+static inline void set_max_rlimit(void)
+{
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
+}
+
+static inline __u32 bpf_link_get_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.id;
+}
+
+static inline __u32 bpf_link_get_prog_id(int fd)
+{
+ struct bpf_link_info link_info = { .id = 0, };
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.prog_id;
+}
+
+static inline __u32 bpf_map_get_id(int fd)
+{
+ struct bpf_map_info map_info = { .id = 0, };
+ __u32 map_info_len = sizeof(map_info);
+
+ bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
+ return map_info.id;
+}
+
+/* trigger the leader program on a cpu */
+static inline int bperf_trigger_reading(int prog_fd, int cpu)
+{
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = NULL,
+ .ctx_size_in = 0,
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ .cpu = cpu,
+ .retval = 0,
+ );
+
+ return bpf_prog_test_run_opts(prog_fd, &opts);
+}
+
#endif /* __PERF_BPF_COUNTER_H */