aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/bpf_counter.c
diff options
context:
space:
mode:
authorIan Rogers <irogers@google.com>2022-05-18 20:20:04 -0700
committerArnaldo Carvalho de Melo <acme@redhat.com>2022-05-23 09:53:06 -0300
commit54668a4ea03e317049b801c419c78e3ec4f366e6 (patch)
treee342790a21b40c3ccb756df172410498b1b3c8e0 /tools/perf/util/bpf_counter.c
parentperf cpumap: Add perf_cpu_map__for_each_idx() (diff)
downloadlinux-dev-54668a4ea03e317049b801c419c78e3ec4f366e6.tar.xz
linux-dev-54668a4ea03e317049b801c419c78e3ec4f366e6.zip
perf bpf_counter: Tidy use of CPU map index
BPF counters are typically running across all CPUs and so the CPU map index and CPU number are the same. There may be cases with offline CPUs where this isn't the case and so ensure the cpu map index for perf_counts is going to be a valid index by explicitly iterating over the CPU map. This also makes it clearer that users of perf_counts are using an index. Collapse some multiple uses of perf_counts into single uses. Signed-off-by: Ian Rogers <irogers@google.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Alexei Starovoitov <ast@kernel.org> Cc: Andrii Nakryiko <andrii@kernel.org> Cc: Daniel Borkmann <daniel@iogearbox.net> Cc: Dave Marchevsky <davemarchevsky@fb.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: John Fastabend <john.fastabend@gmail.com> Cc: KP Singh <kpsingh@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Lv Ruyi <lv.ruyi@zte.com.cn> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Martin KaFai Lau <kafai@fb.com> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Quentin Monnet <quentin@isovalent.com> Cc: Song Liu <songliubraving@fb.com> Cc: Stephane Eranian <eranian@google.com> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Cc: Yonghong Song <yhs@fb.com> Cc: bpf@vger.kernel.org Cc: netdev@vger.kernel.org Link: https://lore.kernel.org/r/20220519032005.1273691-5-irogers@google.com Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to '')
-rw-r--r--tools/perf/util/bpf_counter.c61
1 files changed, 35 insertions, 26 deletions
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 3ce8d03cb7ec..d4931f54e1dd 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -224,25 +224,25 @@ static int bpf_program_profiler__disable(struct evsel *evsel)
static int bpf_program_profiler__read(struct evsel *evsel)
{
- // perf_cpu_map uses /sys/devices/system/cpu/online
- int num_cpu = evsel__nr_cpus(evsel);
// BPF_MAP_TYPE_PERCPU_ARRAY uses /sys/devices/system/cpu/possible
// Sometimes possible > online, like on a Ryzen 3900X that has 24
// threads but its possible showed 0-31 -acme
int num_cpu_bpf = libbpf_num_possible_cpus();
struct bpf_perf_event_value values[num_cpu_bpf];
struct bpf_counter *counter;
+ struct perf_counts_values *counts;
int reading_map_fd;
__u32 key = 0;
- int err, cpu;
+ int err, idx, bpf_cpu;
if (list_empty(&evsel->bpf_counter_list))
return -EAGAIN;
- for (cpu = 0; cpu < num_cpu; cpu++) {
- perf_counts(evsel->counts, cpu, 0)->val = 0;
- perf_counts(evsel->counts, cpu, 0)->ena = 0;
- perf_counts(evsel->counts, cpu, 0)->run = 0;
+ perf_cpu_map__for_each_idx(idx, evsel__cpus(evsel)) {
+ counts = perf_counts(evsel->counts, idx, 0);
+ counts->val = 0;
+ counts->ena = 0;
+ counts->run = 0;
}
list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
struct bpf_prog_profiler_bpf *skel = counter->skel;
@@ -256,10 +256,15 @@ static int bpf_program_profiler__read(struct evsel *evsel)
return err;
}
- for (cpu = 0; cpu < num_cpu; cpu++) {
- perf_counts(evsel->counts, cpu, 0)->val += values[cpu].counter;
- perf_counts(evsel->counts, cpu, 0)->ena += values[cpu].enabled;
- perf_counts(evsel->counts, cpu, 0)->run += values[cpu].running;
+ for (bpf_cpu = 0; bpf_cpu < num_cpu_bpf; bpf_cpu++) {
+ idx = perf_cpu_map__idx(evsel__cpus(evsel),
+ (struct perf_cpu){.cpu = bpf_cpu});
+ if (idx == -1)
+ continue;
+ counts = perf_counts(evsel->counts, idx, 0);
+ counts->val += values[bpf_cpu].counter;
+ counts->ena += values[bpf_cpu].enabled;
+ counts->run += values[bpf_cpu].running;
}
}
return 0;
@@ -621,6 +626,7 @@ static int bperf__read(struct evsel *evsel)
struct bperf_follower_bpf *skel = evsel->follower_skel;
__u32 num_cpu_bpf = cpu__max_cpu().cpu;
struct bpf_perf_event_value values[num_cpu_bpf];
+ struct perf_counts_values *counts;
int reading_map_fd, err = 0;
__u32 i;
int j;
@@ -639,29 +645,32 @@ static int bperf__read(struct evsel *evsel)
case BPERF_FILTER_GLOBAL:
assert(i == 0);
- perf_cpu_map__for_each_cpu(entry, j, all_cpu_map) {
- cpu = entry.cpu;
- perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter;
- perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled;
- perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running;
+ perf_cpu_map__for_each_cpu(entry, j, evsel__cpus(evsel)) {
+ counts = perf_counts(evsel->counts, j, 0);
+ counts->val = values[entry.cpu].counter;
+ counts->ena = values[entry.cpu].enabled;
+ counts->run = values[entry.cpu].running;
}
break;
case BPERF_FILTER_CPU:
- cpu = evsel->core.cpus->map[i].cpu;
- perf_counts(evsel->counts, i, 0)->val = values[cpu].counter;
- perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled;
- perf_counts(evsel->counts, i, 0)->run = values[cpu].running;
+ cpu = perf_cpu_map__cpu(evsel__cpus(evsel), i).cpu;
+ assert(cpu >= 0);
+ counts = perf_counts(evsel->counts, i, 0);
+ counts->val = values[cpu].counter;
+ counts->ena = values[cpu].enabled;
+ counts->run = values[cpu].running;
break;
case BPERF_FILTER_PID:
case BPERF_FILTER_TGID:
- perf_counts(evsel->counts, 0, i)->val = 0;
- perf_counts(evsel->counts, 0, i)->ena = 0;
- perf_counts(evsel->counts, 0, i)->run = 0;
+ counts = perf_counts(evsel->counts, 0, i);
+ counts->val = 0;
+ counts->ena = 0;
+ counts->run = 0;
for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
- perf_counts(evsel->counts, 0, i)->val += values[cpu].counter;
- perf_counts(evsel->counts, 0, i)->ena += values[cpu].enabled;
- perf_counts(evsel->counts, 0, i)->run += values[cpu].running;
+ counts->val += values[cpu].counter;
+ counts->ena += values[cpu].enabled;
+ counts->run += values[cpu].running;
}
break;
default: