aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2022-10-10 22:28:08 -0700
committerArnaldo Carvalho de Melo <acme@redhat.com>2022-10-14 10:29:05 -0300
commitf21cb52036373a108acde5853931facfea727a7b (patch)
tree42fc3816e66fe6d0d268ca190acfcbeef14b3188 /tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
parentMerge tag 'drm-next-2022-10-14' of git://anongit.freedesktop.org/drm/drm (diff)
downloadlinux-dev-f21cb52036373a108acde5853931facfea727a7b.tar.xz
linux-dev-f21cb52036373a108acde5853931facfea727a7b.zip
perf stat: Support old kernels for bperf cgroup counting
The recent change in the cgroup will break the backward compatiblity in the BPF program. It should support both old and new kernels using BPF CO-RE technique. Like the task_struct->__state handling in the offcpu analysis, we can check the field name in the cgroup struct. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Acked-by: Andrii Nakryiko <andrii@kernel.org> Acked-by: Jiri Olsa <jolsa@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Song Liu <songliubraving@fb.com> Cc: Tejun Heo <tj@kernel.org> Cc: bpf@vger.kernel.org Cc: cgroups@vger.kernel.org Cc: zefan li <lizefan.x@bytedance.com> Link: http://lore.kernel.org/lkml/20221011052808.282394-1-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to '')
-rw-r--r--tools/perf/util/bpf_skel/bperf_cgroup.bpf.c29
1 files changed, 28 insertions, 1 deletions
diff --git a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
index 435a87556688..6a438e0102c5 100644
--- a/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
+++ b/tools/perf/util/bpf_skel/bperf_cgroup.bpf.c
@@ -43,6 +43,18 @@ struct {
__uint(value_size, sizeof(struct bpf_perf_event_value));
} cgrp_readings SEC(".maps");
+/* new kernel cgroup definition */
+struct cgroup___new {
+ int level;
+ struct cgroup *ancestors[];
+} __attribute__((preserve_access_index));
+
+/* old kernel cgroup definition */
+struct cgroup___old {
+ int level;
+ u64 ancestor_ids[];
+} __attribute__((preserve_access_index));
+
const volatile __u32 num_events = 1;
const volatile __u32 num_cpus = 1;
@@ -50,6 +62,21 @@ int enabled = 0;
int use_cgroup_v2 = 0;
int perf_subsys_id = -1;
+static inline __u64 get_cgroup_v1_ancestor_id(struct cgroup *cgrp, int level)
+{
+ /* recast pointer to capture new type for compiler */
+ struct cgroup___new *cgrp_new = (void *)cgrp;
+
+ if (bpf_core_field_exists(cgrp_new->ancestors)) {
+ return BPF_CORE_READ(cgrp_new, ancestors[level], kn, id);
+ } else {
+ /* recast pointer to capture old type for compiler */
+ struct cgroup___old *cgrp_old = (void *)cgrp;
+
+ return BPF_CORE_READ(cgrp_old, ancestor_ids[level]);
+ }
+}
+
static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
{
struct task_struct *p = (void *)bpf_get_current_task();
@@ -77,7 +104,7 @@ static inline int get_cgroup_v1_idx(__u32 *cgrps, int size)
break;
// convert cgroup-id to a map index
- cgrp_id = BPF_CORE_READ(cgrp, ancestors[i], kn, id);
+ cgrp_id = get_cgroup_v1_ancestor_id(cgrp, i);
elem = bpf_map_lookup_elem(&cgrp_idx, &cgrp_id);
if (!elem)
continue;