diff options
Diffstat (limited to '')
-rw-r--r-- | tools/perf/builtin-stat.c | 1779 |
1 files changed, 1145 insertions, 634 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index b01af171d94f..35f79b48e8dc 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -41,10 +41,10 @@ */ #include "builtin.h" -#include "perf.h" #include "util/cgroup.h" #include <subcmd/parse-options.h> #include "util/parse-events.h" +#include "util/pmus.h" #include "util/pmu.h" #include "util/event.h" #include "util/evlist.h" @@ -67,6 +67,9 @@ #include "util/top.h" #include "util/affinity.h" #include "util/pfm.h" +#include "util/bpf_counter.h" +#include "util/iostat.h" +#include "util/util.h" #include "asm/bug.h" #include <linux/time64.h> @@ -89,63 +92,19 @@ #include <linux/ctype.h> #include <perf/evlist.h> +#include <internal/threadmap.h> #define DEFAULT_SEPARATOR " " #define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" static void print_counters(struct timespec *ts, int argc, const char **argv); -/* Default events used for perf stat -T */ -static const char *transaction_attrs = { - "task-clock," - "{" - "instructions," - "cycles," - "cpu/cycles-t/," - "cpu/tx-start/," - "cpu/el-start/," - "cpu/cycles-ct/" - "}" -}; - -/* More limited version when the CPU does not have all events. */ -static const char * transaction_limited_attrs = { - "task-clock," - "{" - "instructions," - "cycles," - "cpu/cycles-t/," - "cpu/tx-start/" - "}" -}; - -static const char * topdown_attrs[] = { - "topdown-total-slots", - "topdown-slots-retired", - "topdown-recovery-bubbles", - "topdown-fetch-bubbles", - "topdown-slots-issued", - NULL, -}; - -static const char *topdown_metric_attrs[] = { - "slots", - "topdown-retiring", - "topdown-bad-spec", - "topdown-fe-bound", - "topdown-be-bound", - NULL, -}; - -static const char *smi_cost_attrs = { - "{" - "msr/aperf/," - "msr/smi/," - "cycles" - "}" +static struct evlist *evsel_list; +static struct parse_events_option_args parse_events_option_args = { + .evlistp = &evsel_list, }; -static struct evlist *evsel_list; +static bool all_counters_use_bpf = true; static struct target target = { .uid = UINT_MAX, @@ -153,14 +112,13 @@ static struct target target = { #define METRIC_ONLY_LEN 20 -static volatile pid_t child_pid = -1; +static volatile sig_atomic_t child_pid = -1; static int detailed_run = 0; static bool transaction_run; static bool topdown_run = false; static bool smi_cost = false; static bool smi_reset = false; static int big_num_opt = -1; -static bool group = false; static const char *pre_cmd = NULL; static const char *post_cmd = NULL; static bool sync_run = false; @@ -171,6 +129,7 @@ static bool append_file; static bool interval_count; static const char *output_name; static int output_fd; +static char *metrics; struct perf_stat { bool record; @@ -182,75 +141,63 @@ struct perf_stat { struct perf_cpu_map *cpus; struct perf_thread_map *threads; enum aggr_mode aggr_mode; + u32 aggr_level; }; static struct perf_stat perf_stat; #define STAT_RECORD perf_stat.record -static volatile int done = 0; +static volatile sig_atomic_t done = 0; static struct perf_stat_config stat_config = { .aggr_mode = AGGR_GLOBAL, + .aggr_level = MAX_CACHE_LVL + 1, .scale = true, .unit_width = 4, /* strlen("unit") */ .run_count = 1, .metric_only_len = METRIC_ONLY_LEN, .walltime_nsecs_stats = &walltime_nsecs_stats, + .ru_stats = &ru_stats, .big_num = true, .ctl_fd = -1, - .ctl_fd_ack = -1 + .ctl_fd_ack = -1, + .iostat_run = false, }; -static bool cpus_map_matched(struct evsel *a, struct evsel *b) -{ - if (!a->core.cpus && !b->core.cpus) - return true; - - if (!a->core.cpus || !b->core.cpus) - return false; - - if (a->core.cpus->nr != b->core.cpus->nr) - return false; - - for (int i = 0; i < a->core.cpus->nr; i++) { - if (a->core.cpus->map[i] != b->core.cpus->map[i]) - return false; - } - - return true; -} - static void evlist__check_cpu_maps(struct evlist *evlist) { - struct evsel *evsel, *pos, *leader; - char buf[1024]; + struct evsel *evsel, *warned_leader = NULL; evlist__for_each_entry(evlist, evsel) { - leader = evsel->leader; + struct evsel *leader = evsel__leader(evsel); /* Check that leader matches cpus with each member. */ if (leader == evsel) continue; - if (cpus_map_matched(leader, evsel)) + if (perf_cpu_map__equal(leader->core.cpus, evsel->core.cpus)) continue; /* If there's mismatch disable the group and warn user. */ - WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); - evsel__group_desc(leader, buf, sizeof(buf)); - pr_warning(" %s\n", buf); + if (warned_leader != leader) { + char buf[200]; + + pr_warning("WARNING: grouped events cpus do not match.\n" + "Events with CPUs not matching the leader will " + "be removed from the group.\n"); + evsel__group_desc(leader, buf, sizeof(buf)); + pr_warning(" %s\n", buf); + warned_leader = leader; + } + if (verbose > 0) { + char buf[200]; - if (verbose) { cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); pr_warning(" %s: %s\n", leader->name, buf); cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); pr_warning(" %s: %s\n", evsel->name, buf); } - for_each_group_evsel(pos, leader) { - pos->leader = pos; - pos->core.nr_members = 0; - } - evsel->leader->core.nr_members = 0; + evsel__remove_from_group(evsel, leader); } } @@ -268,13 +215,8 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a, static void perf_stat__reset_stats(void) { - int i; - - perf_evlist__reset_stats(evsel_list); + evlist__reset_stats(evsel_list); perf_stat__reset_shadow_stats(); - - for (i = 0; i < stat_config.stats_num; i++) - perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); } static int process_synthesized_event(struct perf_tool *tool __maybe_unused, @@ -303,34 +245,55 @@ static int write_stat_round_event(u64 tm, u64 type) #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) -static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, +static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, struct perf_counts_values *count) { - struct perf_sample_id *sid = SID(counter, cpu, thread); + struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); + struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, process_synthesized_event, NULL); } -static int read_single_counter(struct evsel *counter, int cpu, +static int read_single_counter(struct evsel *counter, int cpu_map_idx, int thread, struct timespec *rs) { - if (counter->tool_event == PERF_TOOL_DURATION_TIME) { - u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; - struct perf_counts_values *count = - perf_counts(counter->counts, cpu, thread); - count->ena = count->run = val; - count->val = val; - return 0; + switch(counter->tool_event) { + case PERF_TOOL_DURATION_TIME: { + u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; + struct perf_counts_values *count = + perf_counts(counter->counts, cpu_map_idx, thread); + count->ena = count->run = val; + count->val = val; + return 0; + } + case PERF_TOOL_USER_TIME: + case PERF_TOOL_SYSTEM_TIME: { + u64 val; + struct perf_counts_values *count = + perf_counts(counter->counts, cpu_map_idx, thread); + if (counter->tool_event == PERF_TOOL_USER_TIME) + val = ru_stats.ru_utime_usec_stat.mean; + else + val = ru_stats.ru_stime_usec_stat.mean; + count->ena = count->run = val; + count->val = val; + return 0; + } + default: + case PERF_TOOL_NONE: + return evsel__read_counter(counter, cpu_map_idx, thread); + case PERF_TOOL_MAX: + /* This should never be reached */ + return 0; } - return evsel__read_counter(counter, cpu, thread); } /* * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */ -static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) +static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) { int nthreads = perf_thread_map__nr(evsel_list->core.threads); int thread; @@ -338,30 +301,27 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) if (!counter->supported) return -ENOENT; - if (counter->core.system_wide) - nthreads = 1; - for (thread = 0; thread < nthreads; thread++) { struct perf_counts_values *count; - count = perf_counts(counter->counts, cpu, thread); + count = perf_counts(counter->counts, cpu_map_idx, thread); /* * The leader's group read loads data into its group members * (via evsel__read_counter()) and sets their count->loaded. */ - if (!perf_counts__is_loaded(counter->counts, cpu, thread) && - read_single_counter(counter, cpu, thread, rs)) { + if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && + read_single_counter(counter, cpu_map_idx, thread, rs)) { counter->counts->scaled = -1; - perf_counts(counter->counts, cpu, thread)->ena = 0; - perf_counts(counter->counts, cpu, thread)->run = 0; + perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; + perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; return -1; } - perf_counts__set_loaded(counter->counts, cpu, thread, false); + perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); if (STAT_RECORD) { - if (evsel__write_stat_event(counter, cpu, thread, count)) { + if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { pr_err("failed to write stat event\n"); return -1; } @@ -371,7 +331,8 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) fprintf(stat_config.output, "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", evsel__name(counter), - cpu, + perf_cpu_map__cpu(evsel__cpus(counter), + cpu_map_idx).cpu, count->val, count->ena, count->run); } } @@ -381,88 +342,76 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) static int read_affinity_counters(struct timespec *rs) { - struct evsel *counter; - struct affinity affinity; - int i, ncpus, cpu; + struct evlist_cpu_iterator evlist_cpu_itr; + struct affinity saved_affinity, *affinity; - if (affinity__setup(&affinity) < 0) - return -1; + if (all_counters_use_bpf) + return 0; - ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); if (!target__has_cpu(&target) || target__has_per_thread(&target)) - ncpus = 1; - evlist__for_each_cpu(evsel_list, i, cpu) { - if (i >= ncpus) - break; - affinity__set(&affinity, cpu); + affinity = NULL; + else if (affinity__setup(&saved_affinity) < 0) + return -1; + else + affinity = &saved_affinity; - evlist__for_each_entry(evsel_list, counter) { - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (!counter->err) { - counter->err = read_counter_cpu(counter, rs, - counter->cpu_iter - 1); - } + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + struct evsel *counter = evlist_cpu_itr.evsel; + + if (evsel__is_bpf(counter)) + continue; + + if (!counter->err) { + counter->err = read_counter_cpu(counter, rs, + evlist_cpu_itr.cpu_map_idx); } } - affinity__cleanup(&affinity); + if (affinity) + affinity__cleanup(&saved_affinity); + return 0; } -static void read_counters(struct timespec *rs) +static int read_bpf_map_counters(void) { struct evsel *counter; - - if (!stat_config.stop_read_counter && (read_affinity_counters(rs) < 0)) - return; + int err; evlist__for_each_entry(evsel_list, counter) { - if (counter->err) - pr_debug("failed to read counter %s\n", counter->name); - if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) - pr_warning("failed to process counter %s\n", counter->name); - counter->err = 0; - } -} - -static int runtime_stat_new(struct perf_stat_config *config, int nthreads) -{ - int i; - - config->stats = calloc(nthreads, sizeof(struct runtime_stat)); - if (!config->stats) - return -1; - - config->stats_num = nthreads; - - for (i = 0; i < nthreads; i++) - runtime_stat__init(&config->stats[i]); + if (!evsel__is_bpf(counter)) + continue; + err = bpf_counter__read(counter); + if (err) + return err; + } return 0; } -static void runtime_stat_delete(struct perf_stat_config *config) +static int read_counters(struct timespec *rs) { - int i; - - if (!config->stats) - return; - - for (i = 0; i < config->stats_num; i++) - runtime_stat__exit(&config->stats[i]); - - zfree(&config->stats); + if (!stat_config.stop_read_counter) { + if (read_bpf_map_counters() || + read_affinity_counters(rs)) + return -1; + } + return 0; } -static void runtime_stat_reset(struct perf_stat_config *config) +static void process_counters(void) { - int i; + struct evsel *counter; - if (!config->stats) - return; + evlist__for_each_entry(evsel_list, counter) { + if (counter->err) + pr_debug("failed to read counter %s\n", counter->name); + if (counter->err == 0 && perf_stat_process_counter(&stat_config, counter)) + pr_warning("failed to process counter %s\n", counter->name); + counter->err = 0; + } - for (i = 0; i < config->stats_num; i++) - perf_stat__reset_shadow_per_stat(&config->stats[i]); + perf_stat_merge_counters(&stat_config, evsel_list); + perf_stat_process_percore(&stat_config, evsel_list); } static void process_interval(void) @@ -472,9 +421,10 @@ static void process_interval(void) clock_gettime(CLOCK_MONOTONIC, &ts); diff_timespec(&rs, &ts, &ref_time); - perf_stat__reset_shadow_per_stat(&rt_stat); - runtime_stat_reset(&stat_config); - read_counters(&rs); + evlist__reset_aggr_stats(evsel_list); + + if (read_counters(&rs) == 0) + process_counters(); if (STAT_RECORD) { if (WRITE_STAT_ROUND_EVENT(rs.tv_sec * NSEC_PER_SEC + rs.tv_nsec, INTERVAL)) @@ -496,45 +446,48 @@ static bool handle_interval(unsigned int interval, int *times) return false; } -static void enable_counters(void) +static int enable_counters(void) { - if (stat_config.initial_delay < 0) { - pr_info(EVLIST_DISABLED_MSG); - return; - } + struct evsel *evsel; + int err; - if (stat_config.initial_delay > 0) { - pr_info(EVLIST_DISABLED_MSG); - usleep(stat_config.initial_delay * USEC_PER_MSEC); + evlist__for_each_entry(evsel_list, evsel) { + if (!evsel__is_bpf(evsel)) + continue; + + err = bpf_counter__enable(evsel); + if (err) + return err; } - /* - * We need to enable counters only if: - * - we don't have tracee (attaching to task or cpu) - * - we have initial delay configured - */ - if (!target__none(&target) || stat_config.initial_delay) { - evlist__enable(evsel_list); - if (stat_config.initial_delay > 0) - pr_info(EVLIST_ENABLED_MSG); + if (!target__enable_on_exec(&target)) { + if (!all_counters_use_bpf) + evlist__enable(evsel_list); } + return 0; } static void disable_counters(void) { + struct evsel *counter; + /* * If we don't have tracee (attaching to task or cpu), counters may * still be running. To get accurate group ratios, we must stop groups * from counting before reading their constituent counters. */ - if (!target__none(&target)) - evlist__disable(evsel_list); + if (!target__none(&target)) { + evlist__for_each_entry(evsel_list, counter) + bpf_counter__disable(counter); + if (!all_counters_use_bpf) + evlist__disable(evsel_list); + } } -static volatile int workload_exec_errno; +static volatile sig_atomic_t workload_exec_errno; /* - * perf_evlist__prepare_workload will send a SIGUSR1 + * evlist__prepare_workload will send a SIGUSR1 * if the fork fails, since we asked by setting its * want_signal to true. */ @@ -578,18 +531,17 @@ static void process_evlist(struct evlist *evlist, unsigned int interval) if (evlist__ctlfd_process(evlist, &cmd) > 0) { switch (cmd) { case EVLIST_CTL_CMD_ENABLE: - pr_info(EVLIST_ENABLED_MSG); - if (interval) - process_interval(); - break; + fallthrough; case EVLIST_CTL_CMD_DISABLE: if (interval) process_interval(); - pr_info(EVLIST_DISABLED_MSG); break; case EVLIST_CTL_CMD_SNAPSHOT: case EVLIST_CTL_CMD_ACK: case EVLIST_CTL_CMD_UNSUPPORTED: + case EVLIST_CTL_CMD_EVLIST: + case EVLIST_CTL_CMD_STOP: + case EVLIST_CTL_CMD_PING: default: break; } @@ -678,10 +630,10 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) */ counter->errored = true; - if ((counter->leader != counter) || - !(counter->leader->core.nr_members > 1)) + if ((evsel__leader(counter) != counter) || + !(counter->core.leader->nr_members > 1)) return COUNTER_SKIP; - } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { + } else if (evsel__fallback(counter, &target, errno, msg, sizeof(msg))) { if (verbose > 0) ui__warning("%s\n", msg); return COUNTER_RETRY; @@ -697,6 +649,13 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) evsel_list->core.threads->err_thread = -1; return COUNTER_RETRY; } + } else if (counter->skippable) { + if (verbose > 0) + ui__warning("skipping event %s that kernel failed to open .\n", + evsel__name(counter)); + counter->supported = false; + counter->errored = true; + return COUNTER_SKIP; } evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); @@ -719,67 +678,82 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) int status = 0; const bool forks = (argc > 0); bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; - struct affinity affinity; - int i, cpu; + struct evlist_cpu_iterator evlist_cpu_itr; + struct affinity saved_affinity, *affinity = NULL; + int err; bool second_pass = false; if (forks) { - if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe, - workload_exec_failed_signal) < 0) { + if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { perror("failed to prepare workload"); return -1; } child_pid = evsel_list->workload.pid; } - if (group) - perf_evlist__set_leader(evsel_list); + if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { + if (affinity__setup(&saved_affinity) < 0) + return -1; + affinity = &saved_affinity; + } - if (affinity__setup(&affinity) < 0) - return -1; + evlist__for_each_entry(evsel_list, counter) { + counter->reset_group = false; + if (bpf_counter__load(counter, &target)) + return -1; + if (!(evsel__is_bperf(counter))) + all_counters_use_bpf = false; + } - evlist__for_each_cpu (evsel_list, i, cpu) { - affinity__set(&affinity, cpu); + evlist__reset_aggr_stats(evsel_list); - evlist__for_each_entry(evsel_list, counter) { - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (counter->reset_group || counter->errored) - continue; -try_again: - if (create_perf_stat_counter(counter, &stat_config, &target, - counter->cpu_iter - 1) < 0) { - - /* - * Weak group failed. We cannot just undo this here - * because earlier CPUs might be in group mode, and the kernel - * doesn't support mixing group and non group reads. Defer - * it to later. - * Don't close here because we're in the wrong affinity. - */ - if ((errno == EINVAL || errno == EBADF) && - counter->leader != counter && - counter->weak_group) { - perf_evlist__reset_weak_group(evsel_list, counter, false); - assert(counter->reset_group); - second_pass = true; - continue; - } + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + counter = evlist_cpu_itr.evsel; - switch (stat_handle_error(counter)) { - case COUNTER_FATAL: - return -1; - case COUNTER_RETRY: - goto try_again; - case COUNTER_SKIP: - continue; - default: - break; - } + /* + * bperf calls evsel__open_per_cpu() in bperf__load(), so + * no need to call it again here. + */ + if (target.use_bpf) + break; + if (counter->reset_group || counter->errored) + continue; + if (evsel__is_bperf(counter)) + continue; +try_again: + if (create_perf_stat_counter(counter, &stat_config, &target, + evlist_cpu_itr.cpu_map_idx) < 0) { + + /* + * Weak group failed. We cannot just undo this here + * because earlier CPUs might be in group mode, and the kernel + * doesn't support mixing group and non group reads. Defer + * it to later. + * Don't close here because we're in the wrong affinity. + */ + if ((errno == EINVAL || errno == EBADF) && + evsel__leader(counter) != counter && + counter->weak_group) { + evlist__reset_weak_group(evsel_list, counter, false); + assert(counter->reset_group); + second_pass = true; + continue; } - counter->supported = true; + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again; + case COUNTER_SKIP: + continue; + default: + break; + } + } + counter->supported = true; } if (second_pass) { @@ -788,45 +762,41 @@ try_again: * and also close errored counters. */ - evlist__for_each_cpu(evsel_list, i, cpu) { - affinity__set(&affinity, cpu); - /* First close errored or weak retry */ - evlist__for_each_entry(evsel_list, counter) { - if (!counter->reset_group && !counter->errored) - continue; - if (evsel__cpu_iter_skip_no_inc(counter, cpu)) - continue; - perf_evsel__close_cpu(&counter->core, counter->cpu_iter); - } - /* Now reopen weak */ - evlist__for_each_entry(evsel_list, counter) { - if (!counter->reset_group && !counter->errored) - continue; - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (!counter->reset_group) - continue; + /* First close errored or weak retry */ + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + counter = evlist_cpu_itr.evsel; + + if (!counter->reset_group && !counter->errored) + continue; + + perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); + } + /* Now reopen weak */ + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + counter = evlist_cpu_itr.evsel; + + if (!counter->reset_group) + continue; try_again_reset: - pr_debug2("reopening weak %s\n", evsel__name(counter)); - if (create_perf_stat_counter(counter, &stat_config, &target, - counter->cpu_iter - 1) < 0) { - - switch (stat_handle_error(counter)) { - case COUNTER_FATAL: - return -1; - case COUNTER_RETRY: - goto try_again_reset; - case COUNTER_SKIP: - continue; - default: - break; - } + pr_debug2("reopening weak %s\n", evsel__name(counter)); + if (create_perf_stat_counter(counter, &stat_config, &target, + evlist_cpu_itr.cpu_map_idx) < 0) { + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again_reset; + case COUNTER_SKIP: + continue; + default: + break; } - counter->supported = true; } + counter->supported = true; } } - affinity__cleanup(&affinity); + affinity__cleanup(affinity); evlist__for_each_entry(evsel_list, counter) { if (!counter->supported) { @@ -843,7 +813,7 @@ try_again_reset: return -1; } - if (perf_evlist__apply_filters(evsel_list, &counter)) { + if (evlist__apply_filters(evsel_list, &counter)) { pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", counter->filter, evsel__name(counter), errno, str_error_r(errno, msg, sizeof(msg))); @@ -851,7 +821,7 @@ try_again_reset: } if (STAT_RECORD) { - int err, fd = perf_data__fd(&perf_stat.data); + int fd = perf_data__fd(&perf_stat.data); if (is_pipe) { err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); @@ -869,16 +839,31 @@ try_again_reset: return err; } - /* - * Enable counters and exec the command: - */ + if (target.initial_delay) { + pr_info(EVLIST_DISABLED_MSG); + } else { + err = enable_counters(); + if (err) + return -1; + } + + /* Exec the command, if any */ + if (forks) + evlist__start_workload(evsel_list); + + if (target.initial_delay > 0) { + usleep(target.initial_delay * USEC_PER_MSEC); + err = enable_counters(); + if (err) + return -1; + + pr_info(EVLIST_ENABLED_MSG); + } + t0 = rdclock(); clock_gettime(CLOCK_MONOTONIC, &ref_time); if (forks) { - perf_evlist__start_workload(evsel_list); - enable_counters(); - if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) status = dispatch_events(forks, timeout, interval, ×); if (child_pid != -1) { @@ -896,7 +881,6 @@ try_again_reset: if (WIFSIGNALED(status)) psignal(WTERMSIG(status), argv[0]); } else { - enable_counters(); status = dispatch_events(forks, timeout, interval, ×); } @@ -913,15 +897,13 @@ try_again_reset: init_stats(&walltime_nsecs_stats); update_stats(&walltime_nsecs_stats, t1 - t0); - if (stat_config.aggr_mode == AGGR_GLOBAL) - perf_evlist__save_aggr_prev_raw_counts(evsel_list); - - perf_evlist__copy_prev_raw_counts(evsel_list); - perf_evlist__reset_prev_raw_counts(evsel_list); - runtime_stat_reset(&stat_config); - perf_stat__reset_shadow_per_stat(&rt_stat); - } else + evlist__copy_prev_raw_counts(evsel_list); + evlist__reset_prev_raw_counts(evsel_list); + evlist__reset_aggr_stats(evsel_list); + } else { update_stats(&walltime_nsecs_stats, t1 - t0); + update_rusage_stats(&ru_stats, &stat_config.ru_data); + } /* * Closing a group leader splits the group, and as we only disable @@ -929,7 +911,8 @@ try_again_reset: * avoid arbitrary skew, we must read all counters before closing any * group leaders. */ - read_counters(&(struct timespec) { .tv_nsec = t1-t0 }); + if (read_counters(&(struct timespec) { .tv_nsec = t1-t0 }) == 0) + process_counters(); /* * We need to keep evsel_list alive, because it's processed @@ -972,12 +955,13 @@ static void print_counters(struct timespec *ts, int argc, const char **argv) /* Do not print anything if we record to the pipe. */ if (STAT_RECORD && perf_stat.data.is_pipe) return; + if (quiet) + return; - perf_evlist__print_counters(evsel_list, &stat_config, &target, - ts, argc, argv); + evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); } -static volatile int signr = -1; +static volatile sig_atomic_t signr = -1; static void skip_signal(int signo) { @@ -1025,6 +1009,11 @@ void perf_stat__set_big_num(int set) stat_config.big_num = (set != 0); } +void perf_stat__set_no_csv_summary(int set) +{ + stat_config.no_csv_summary = (set != 0); +} + static int stat__set_big_num(const struct option *opt __maybe_unused, const char *s __maybe_unused, int unset) { @@ -1041,14 +1030,23 @@ static int enable_metric_only(const struct option *opt __maybe_unused, return 0; } -static int parse_metric_groups(const struct option *opt, +static int append_metric_groups(const struct option *opt __maybe_unused, const char *str, int unset __maybe_unused) { - return metricgroup__parse_groups(opt, str, - stat_config.metric_no_group, - stat_config.metric_no_merge, - &stat_config.metric_events); + if (metrics) { + char *tmp; + + if (asprintf(&tmp, "%s,%s", metrics, str) < 0) + return -ENOMEM; + free(metrics); + metrics = tmp; + } else { + metrics = strdup(str); + if (!metrics) + return -ENOMEM; + } + return 0; } static int parse_control_option(const struct option *opt, @@ -1071,10 +1069,81 @@ static int parse_stat_cgroups(const struct option *opt, return parse_cgroups(opt, str, unset); } +static int parse_cputype(const struct option *opt, + const char *str, + int unset __maybe_unused) +{ + const struct perf_pmu *pmu; + struct evlist *evlist = *(struct evlist **)opt->value; + + if (!list_empty(&evlist->core.entries)) { + fprintf(stderr, "Must define cputype before events/metrics\n"); + return -1; + } + + pmu = perf_pmus__pmu_for_pmu_filter(str); + if (!pmu) { + fprintf(stderr, "--cputype %s is not supported!\n", str); + return -1; + } + parse_events_option_args.pmu_filter = pmu->name; + + return 0; +} + +static int parse_cache_level(const struct option *opt, + const char *str, + int unset __maybe_unused) +{ + int level; + u32 *aggr_mode = (u32 *)opt->value; + u32 *aggr_level = (u32 *)opt->data; + + /* + * If no string is specified, aggregate based on the topology of + * Last Level Cache (LLC). Since the LLC level can change from + * architecture to architecture, set level greater than + * MAX_CACHE_LVL which will be interpreted as LLC. + */ + if (str == NULL) { + level = MAX_CACHE_LVL + 1; + goto out; + } + + /* + * The format to specify cache level is LX or lX where X is the + * cache level. + */ + if (strlen(str) != 2 || (str[0] != 'l' && str[0] != 'L')) { + pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", + MAX_CACHE_LVL, + MAX_CACHE_LVL); + return -EINVAL; + } + + level = atoi(&str[1]); + if (level < 1) { + pr_err("Cache level must be of form L[1-%d], or l[1-%d]\n", + MAX_CACHE_LVL, + MAX_CACHE_LVL); + return -EINVAL; + } + + if (level > MAX_CACHE_LVL) { + pr_err("perf only supports max cache level of %d.\n" + "Consider increasing MAX_CACHE_LVL\n", MAX_CACHE_LVL); + return -EINVAL; + } +out: + *aggr_mode = AGGR_CACHE; + *aggr_level = level; + return 0; +} + static struct option stat_options[] = { OPT_BOOLEAN('T', "transaction", &transaction_run, "hardware transaction statistics"), - OPT_CALLBACK('e', "event", &evsel_list, "event", + OPT_CALLBACK('e', "event", &parse_events_option_args, "event", "event selector. use 'perf list' to list available events", parse_events_option), OPT_CALLBACK(0, "filter", &evsel_list, "filter", @@ -1085,10 +1154,16 @@ static struct option stat_options[] = { "stat events on existing process id"), OPT_STRING('t', "tid", &target.tid, "tid", "stat events on existing thread id"), +#ifdef HAVE_BPF_SKEL + OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", + "stat events on existing bpf program id"), + OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, + "use bpf program to count events"), + OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", + "path to perf_event_attr map"), +#endif OPT_BOOLEAN('a', "all-cpus", &target.system_wide, "system-wide collection from all CPUs"), - OPT_BOOLEAN('g', "group", &group, - "put the counters into a counter group"), OPT_BOOLEAN(0, "scale", &stat_config.scale, "Use --no-scale to disable counter scaling for multiplexing"), OPT_INCR('v', "verbose", &verbose, @@ -1109,10 +1184,15 @@ static struct option stat_options[] = { OPT_STRING('C', "cpu", &target.cpu_list, "cpu", "list of cpus to monitor in system-wide"), OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, - "disable CPU count aggregation", AGGR_NONE), - OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), + "disable aggregation across CPUs or PMUs", AGGR_NONE), + OPT_SET_UINT(0, "no-merge", &stat_config.aggr_mode, + "disable aggregation the same as -A or -no-aggr", AGGR_NONE), + OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, + "Merge identical named hybrid events"), OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", "print counts with custom separator"), + OPT_BOOLEAN('j', "json-output", &stat_config.json_output, + "print counts in JSON format"), OPT_CALLBACK('G', "cgroup", &evsel_list, "name", "monitor event in cgroup name only", parse_stat_cgroups), OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", @@ -1138,13 +1218,18 @@ static struct option stat_options[] = { "aggregate counts per processor socket", AGGR_SOCKET), OPT_SET_UINT(0, "per-die", &stat_config.aggr_mode, "aggregate counts per processor die", AGGR_DIE), + OPT_SET_UINT(0, "per-cluster", &stat_config.aggr_mode, + "aggregate counts per processor cluster", AGGR_CLUSTER), + OPT_CALLBACK_OPTARG(0, "per-cache", &stat_config.aggr_mode, &stat_config.aggr_level, + "cache level", "aggregate count at this cache level (Default: LLC)", + parse_cache_level), OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode, "aggregate counts per physical processor core", AGGR_CORE), OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode, "aggregate counts per thread", AGGR_THREAD), OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, "aggregate counts per numa node", AGGR_NODE), - OPT_INTEGER('D', "delay", &stat_config.initial_delay, + OPT_INTEGER('D', "delay", &target.initial_delay, "ms to wait before starting measurement after program start (-1: start with events disabled)"), OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, "Only print computed metrics. No raw values", enable_metric_only), @@ -1152,13 +1237,17 @@ static struct option stat_options[] = { "don't group metric events, impacts multiplexing"), OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, "don't try to share events between metrics in a group"), + OPT_BOOLEAN(0, "metric-no-threshold", &stat_config.metric_no_threshold, + "disable adding events for the metric threshold calculation"), OPT_BOOLEAN(0, "topdown", &topdown_run, - "measure topdown level 1 statistics"), + "measure top-down statistics"), + OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, + "Set the metrics level for the top-down statistics (0: max level)"), OPT_BOOLEAN(0, "smi-cost", &smi_cost, "measure SMI cost"), OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", "monitor specified metrics or metric groups (separated by ,)", - parse_metric_groups), + append_metric_groups), OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, "Configure all used events to run in kernel space.", PARSE_OPT_EXCLUSIVE), @@ -1171,6 +1260,14 @@ static struct option stat_options[] = { "threads of same physical core"), OPT_BOOLEAN(0, "summary", &stat_config.summary, "print summary for interval mode"), + OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, + "don't print 'summary' for CSV summary output"), + OPT_BOOLEAN(0, "quiet", &quiet, + "don't print any output, messages or warnings (useful with record)"), + OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", + "Only enable events on applying cpu with this type " + "for hybrid platform (e.g. core or atom)", + parse_cputype), #ifdef HAVE_LIBPFM OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", "libpfm4 event selector. use 'perf list' to list available events", @@ -1181,133 +1278,342 @@ static struct option stat_options[] = { "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", parse_control_option), + OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", + "measure I/O performance metrics provided by arch/platform", + iostat_parse), OPT_END() }; -static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) +/** + * Calculate the cache instance ID from the map in + * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list + * Cache instance ID is the first CPU reported in the shared_cpu_list file. + */ +static int cpu__get_cache_id_from_map(struct perf_cpu cpu, char *map) { - return cpu_map__get_socket(map, cpu, NULL); + int id; + struct perf_cpu_map *cpu_map = perf_cpu_map__new(map); + + /* + * If the map contains no CPU, consider the current CPU to + * be the first online CPU in the cache domain else use the + * first online CPU of the cache domain as the ID. + */ + id = perf_cpu_map__min(cpu_map).cpu; + if (id == -1) + id = cpu.cpu; + + /* Free the perf_cpu_map used to find the cache ID */ + perf_cpu_map__put(cpu_map); + + return id; } -static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) +/** + * cpu__get_cache_id - Returns 0 if successful in populating the + * cache level and cache id. Cache level is read from + * /sys/devices/system/cpu/cpuX/cache/indexY/level where as cache instance ID + * is the first CPU reported by + * /sys/devices/system/cpu/cpuX/cache/indexY/shared_cpu_list + */ +static int cpu__get_cache_details(struct perf_cpu cpu, struct perf_cache *cache) { - return cpu_map__get_die(map, cpu, NULL); + int ret = 0; + u32 cache_level = stat_config.aggr_level; + struct cpu_cache_level caches[MAX_CACHE_LVL]; + u32 i = 0, caches_cnt = 0; + + cache->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; + cache->cache = -1; + + ret = build_caches_for_cpu(cpu.cpu, caches, &caches_cnt); + if (ret) { + /* + * If caches_cnt is not 0, cpu_cache_level data + * was allocated when building the topology. + * Free the allocated data before returning. + */ + if (caches_cnt) + goto free_caches; + + return ret; + } + + if (!caches_cnt) + return -1; + + /* + * Save the data for the highest level if no + * level was specified by the user. + */ + if (cache_level > MAX_CACHE_LVL) { + int max_level_index = 0; + + for (i = 1; i < caches_cnt; ++i) { + if (caches[i].level > caches[max_level_index].level) + max_level_index = i; + } + + cache->cache_lvl = caches[max_level_index].level; + cache->cache = cpu__get_cache_id_from_map(cpu, caches[max_level_index].map); + + /* Reset i to 0 to free entire caches[] */ + i = 0; + goto free_caches; + } + + for (i = 0; i < caches_cnt; ++i) { + if (caches[i].level == cache_level) { + cache->cache_lvl = cache_level; + cache->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); + } + + cpu_cache_level__free(&caches[i]); + } + +free_caches: + /* + * Free all the allocated cpu_cache_level data. + */ + while (i < caches_cnt) + cpu_cache_level__free(&caches[i++]); + + return ret; } -static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) +/** + * aggr_cpu_id__cache - Create an aggr_cpu_id with cache instache ID, cache + * level, die and socket populated with the cache instache ID, cache level, + * die and socket for cpu. The function signature is compatible with + * aggr_cpu_id_get_t. + */ +static struct aggr_cpu_id aggr_cpu_id__cache(struct perf_cpu cpu, void *data) { - return cpu_map__get_core(map, cpu, NULL); + int ret; + struct aggr_cpu_id id; + struct perf_cache cache; + + id = aggr_cpu_id__die(cpu, data); + if (aggr_cpu_id__is_empty(&id)) + return id; + + ret = cpu__get_cache_details(cpu, &cache); + if (ret) + return id; + + id.cache_lvl = cache.cache_lvl; + id.cache = cache.cache; + return id; +} + +static const char *const aggr_mode__string[] = { + [AGGR_CORE] = "core", + [AGGR_CACHE] = "cache", + [AGGR_CLUSTER] = "cluster", + [AGGR_DIE] = "die", + [AGGR_GLOBAL] = "global", + [AGGR_NODE] = "node", + [AGGR_NONE] = "none", + [AGGR_SOCKET] = "socket", + [AGGR_THREAD] = "thread", + [AGGR_UNSET] = "unset", +}; + +static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) +{ + return aggr_cpu_id__socket(cpu, /*data=*/NULL); } -static int perf_stat__get_node(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) +static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__get_node(map, cpu, NULL); + return aggr_cpu_id__die(cpu, /*data=*/NULL); } -static int perf_stat__get_aggr(struct perf_stat_config *config, - aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_cache_id(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - int cpu; + return aggr_cpu_id__cache(cpu, /*data=*/NULL); +} - if (idx >= map->nr) - return -1; +static struct aggr_cpu_id perf_stat__get_cluster(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) +{ + return aggr_cpu_id__cluster(cpu, /*data=*/NULL); +} - cpu = map->map[idx]; +static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) +{ + return aggr_cpu_id__core(cpu, /*data=*/NULL); +} - if (config->cpus_aggr_map->map[cpu] == -1) - config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); +static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) +{ + return aggr_cpu_id__node(cpu, /*data=*/NULL); +} - return config->cpus_aggr_map->map[cpu]; +static struct aggr_cpu_id perf_stat__get_global(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) +{ + return aggr_cpu_id__global(cpu, /*data=*/NULL); } -static int perf_stat__get_socket_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_cpu(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); + return aggr_cpu_id__cpu(cpu, /*data=*/NULL); } -static int perf_stat__get_die_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, + aggr_get_id_t get_id, struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); + struct aggr_cpu_id id; + + /* per-process mode - should use global aggr mode */ + if (cpu.cpu == -1) + return get_id(config, cpu); + + if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) + config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); + + id = config->cpus_aggr_map->map[cpu.cpu]; + return id; } -static int perf_stat__get_core_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); } -static int perf_stat__get_node_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_node, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_die, cpu); } -static bool term_percore_set(void) +static struct aggr_cpu_id perf_stat__get_cluster_cached(struct perf_stat_config *config, + struct perf_cpu cpu) { - struct evsel *counter; + return perf_stat__get_aggr(config, perf_stat__get_cluster, cpu); +} - evlist__for_each_entry(evsel_list, counter) { - if (counter->percore) - return true; - } +static struct aggr_cpu_id perf_stat__get_cache_id_cached(struct perf_stat_config *config, + struct perf_cpu cpu) +{ + return perf_stat__get_aggr(config, perf_stat__get_cache_id, cpu); +} - return false; +static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, + struct perf_cpu cpu) +{ + return perf_stat__get_aggr(config, perf_stat__get_core, cpu); } -static int perf_stat_init_aggr_mode(void) +static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, + struct perf_cpu cpu) { - int nr; + return perf_stat__get_aggr(config, perf_stat__get_node, cpu); +} + +static struct aggr_cpu_id perf_stat__get_global_cached(struct perf_stat_config *config, + struct perf_cpu cpu) +{ + return perf_stat__get_aggr(config, perf_stat__get_global, cpu); +} - switch (stat_config.aggr_mode) { +static struct aggr_cpu_id perf_stat__get_cpu_cached(struct perf_stat_config *config, + struct perf_cpu cpu) +{ + return perf_stat__get_aggr(config, perf_stat__get_cpu, cpu); +} + +static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) +{ + switch (aggr_mode) { case AGGR_SOCKET: - if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build socket map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_socket_cached; - break; + return aggr_cpu_id__socket; case AGGR_DIE: - if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build die map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_die_cached; - break; + return aggr_cpu_id__die; + case AGGR_CLUSTER: + return aggr_cpu_id__cluster; + case AGGR_CACHE: + return aggr_cpu_id__cache; case AGGR_CORE: - if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_cached; - break; + return aggr_cpu_id__core; case AGGR_NODE: - if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_node_cached; - break; + return aggr_cpu_id__node; case AGGR_NONE: - if (term_percore_set()) { - if (cpu_map__build_core_map(evsel_list->core.cpus, - &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_cached; - } - break; + return aggr_cpu_id__cpu; + case AGGR_GLOBAL: + return aggr_cpu_id__global; + case AGGR_THREAD: + case AGGR_UNSET: + case AGGR_MAX: + default: + return NULL; + } +} + +static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) +{ + switch (aggr_mode) { + case AGGR_SOCKET: + return perf_stat__get_socket_cached; + case AGGR_DIE: + return perf_stat__get_die_cached; + case AGGR_CLUSTER: + return perf_stat__get_cluster_cached; + case AGGR_CACHE: + return perf_stat__get_cache_id_cached; + case AGGR_CORE: + return perf_stat__get_core_cached; + case AGGR_NODE: + return perf_stat__get_node_cached; + case AGGR_NONE: + return perf_stat__get_cpu_cached; case AGGR_GLOBAL: + return perf_stat__get_global_cached; case AGGR_THREAD: case AGGR_UNSET: + case AGGR_MAX: default: - break; + return NULL; + } +} + +static int perf_stat_init_aggr_mode(void) +{ + int nr; + aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); + + if (get_id) { + bool needs_sort = stat_config.aggr_mode != AGGR_NONE; + stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, + get_id, /*data=*/NULL, needs_sort); + if (!stat_config.aggr_map) { + pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]); + return -1; + } + stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); + } + + if (stat_config.aggr_mode == AGGR_THREAD) { + nr = perf_thread_map__nr(evsel_list->core.threads); + stat_config.aggr_map = cpu_aggr_map__empty_new(nr); + if (stat_config.aggr_map == NULL) + return -ENOMEM; + + for (int s = 0; s < nr; s++) { + struct aggr_cpu_id id = aggr_cpu_id__empty(); + + id.thread_idx = s; + stat_config.aggr_map->map[s] = id; + } + return 0; } /* @@ -1315,191 +1621,315 @@ static int perf_stat_init_aggr_mode(void) * taking the highest cpu number to be the size of * the aggregation translate cpumap. */ - nr = perf_cpu_map__max(evsel_list->core.cpus); - stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1); + if (!perf_cpu_map__is_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus)) + nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; + else + nr = 0; + stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); return stat_config.cpus_aggr_map ? 0 : -ENOMEM; } +static void cpu_aggr_map__delete(struct cpu_aggr_map *map) +{ + free(map); +} + static void perf_stat__exit_aggr_mode(void) { - perf_cpu_map__put(stat_config.aggr_map); - perf_cpu_map__put(stat_config.cpus_aggr_map); + cpu_aggr_map__delete(stat_config.aggr_map); + cpu_aggr_map__delete(stat_config.cpus_aggr_map); stat_config.aggr_map = NULL; stat_config.cpus_aggr_map = NULL; } -static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) { - int cpu; - - if (idx > map->nr) - return -1; - - cpu = map->map[idx]; + struct perf_env *env = data; + struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu >= env->nr_cpus_avail) - return -1; + if (cpu.cpu != -1) + id.socket = env->cpu[cpu.cpu].socket_id; - return cpu; + return id; } -static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; - int cpu = perf_env__get_cpu(env, map, idx); + struct aggr_cpu_id id = aggr_cpu_id__empty(); + + if (cpu.cpu != -1) { + /* + * die_id is relative to socket, so start + * with the socket ID and then add die to + * make a unique ID. + */ + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; + } - return cpu == -1 ? -1 : env->cpu[cpu].socket_id; + return id; } -static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) +static void perf_env__get_cache_id_for_cpu(struct perf_cpu cpu, struct perf_env *env, + u32 cache_level, struct aggr_cpu_id *id) { - struct perf_env *env = data; - int die_id = -1, cpu = perf_env__get_cpu(env, map, idx); + int i; + int caches_cnt = env->caches_cnt; + struct cpu_cache_level *caches = env->caches; + + id->cache_lvl = (cache_level > MAX_CACHE_LVL) ? 0 : cache_level; + id->cache = -1; + + if (!caches_cnt) + return; + + for (i = caches_cnt - 1; i > -1; --i) { + struct perf_cpu_map *cpu_map; + int map_contains_cpu; - if (cpu != -1) { /* - * Encode socket in bit range 15:8 - * die_id is relative to socket, - * we need a global id. So we combine - * socket + die id + * If user has not specified a level, find the fist level with + * the cpu in the map. Since building the map is expensive, do + * this only if levels match. */ - if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n")) - return -1; + if (cache_level <= MAX_CACHE_LVL && caches[i].level != cache_level) + continue; - if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n")) - return -1; + cpu_map = perf_cpu_map__new(caches[i].map); + map_contains_cpu = perf_cpu_map__idx(cpu_map, cpu); + perf_cpu_map__put(cpu_map); + + if (map_contains_cpu != -1) { + id->cache_lvl = caches[i].level; + id->cache = cpu__get_cache_id_from_map(cpu, caches[i].map); + return; + } + } +} + +static struct aggr_cpu_id perf_env__get_cache_aggr_by_cpu(struct perf_cpu cpu, + void *data) +{ + struct perf_env *env = data; + struct aggr_cpu_id id = aggr_cpu_id__empty(); + + if (cpu.cpu != -1) { + u32 cache_level = (perf_stat.aggr_level) ?: stat_config.aggr_level; + + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; + perf_env__get_cache_id_for_cpu(cpu, env, cache_level, &id); + } + + return id; +} + +static struct aggr_cpu_id perf_env__get_cluster_aggr_by_cpu(struct perf_cpu cpu, + void *data) +{ + struct perf_env *env = data; + struct aggr_cpu_id id = aggr_cpu_id__empty(); - die_id = (env->cpu[cpu].socket_id << 8) | (env->cpu[cpu].die_id & 0xff); + if (cpu.cpu != -1) { + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; + id.cluster = env->cpu[cpu.cpu].cluster_id; } - return die_id; + return id; } -static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; - int core = -1, cpu = perf_env__get_cpu(env, map, idx); + struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu != -1) { + if (cpu.cpu != -1) { /* - * Encode socket in bit range 31:24 - * encode die id in bit range 23:16 - * core_id is relative to socket and die, - * we need a global id. So we combine - * socket + die id + core id + * core_id is relative to socket, die and cluster, we need a + * global id. So we set socket, die id, cluster id and core id. */ - if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n")) - return -1; + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; + id.cluster = env->cpu[cpu.cpu].cluster_id; + id.core = env->cpu[cpu.cpu].core_id; + } - if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n")) - return -1; + return id; +} - if (WARN_ONCE(env->cpu[cpu].core_id >> 16, "The core id number is too big.\n")) - return -1; +static struct aggr_cpu_id perf_env__get_cpu_aggr_by_cpu(struct perf_cpu cpu, void *data) +{ + struct perf_env *env = data; + struct aggr_cpu_id id = aggr_cpu_id__empty(); - core = (env->cpu[cpu].socket_id << 24) | - (env->cpu[cpu].die_id << 16) | - (env->cpu[cpu].core_id & 0xffff); + if (cpu.cpu != -1) { + /* + * core_id is relative to socket and die, + * we need a global id. So we set + * socket, die id and core id + */ + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; + id.core = env->cpu[cpu.cpu].core_id; + id.cpu = cpu; } - return core; + return id; } -static int perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) { - int cpu = perf_env__get_cpu(data, map, idx); + struct aggr_cpu_id id = aggr_cpu_id__empty(); - return perf_env__numa_node(data, cpu); + id.node = perf_env__numa_node(data, cpu); + return id; } -static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct perf_cpu_map **sockp) +static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu __maybe_unused, + void *data __maybe_unused) { - return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env); + struct aggr_cpu_id id = aggr_cpu_id__empty(); + + /* it always aggregates to the cpu 0 */ + id.cpu = (struct perf_cpu){ .cpu = 0 }; + return id; } -static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct perf_cpu_map **diep) +static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__build_map(cpus, diep, perf_env__get_die, env); + return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); } - -static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct perf_cpu_map **corep) +static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__build_map(cpus, corep, perf_env__get_core, env); + return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct perf_cpu_map **nodep) +static struct aggr_cpu_id perf_stat__get_cluster_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__build_map(cpus, nodep, perf_env__get_node, env); + return perf_env__get_cluster_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return perf_env__get_socket(map, idx, &perf_stat.session->header.env); + return perf_env__get_cache_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) + +static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return perf_env__get_die(map, idx, &perf_stat.session->header.env); + return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return perf_env__get_core(map, idx, &perf_stat.session->header.env); + return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return perf_env__get_node(map, idx, &perf_stat.session->header.env); + return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat_init_aggr_mode_file(struct perf_stat *st) +static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - struct perf_env *env = &st->session->header.env; + return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env); +} - switch (stat_config.aggr_mode) { +static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) +{ + switch (aggr_mode) { case AGGR_SOCKET: - if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build socket map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_socket_file; - break; + return perf_env__get_socket_aggr_by_cpu; case AGGR_DIE: - if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build die map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_die_file; - break; + return perf_env__get_die_aggr_by_cpu; + case AGGR_CLUSTER: + return perf_env__get_cluster_aggr_by_cpu; + case AGGR_CACHE: + return perf_env__get_cache_aggr_by_cpu; case AGGR_CORE: - if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_file; - break; + return perf_env__get_core_aggr_by_cpu; case AGGR_NODE: - if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_node_file; - break; + return perf_env__get_node_aggr_by_cpu; + case AGGR_GLOBAL: + return perf_env__get_global_aggr_by_cpu; case AGGR_NONE: + return perf_env__get_cpu_aggr_by_cpu; + case AGGR_THREAD: + case AGGR_UNSET: + case AGGR_MAX: + default: + return NULL; + } +} + +static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) +{ + switch (aggr_mode) { + case AGGR_SOCKET: + return perf_stat__get_socket_file; + case AGGR_DIE: + return perf_stat__get_die_file; + case AGGR_CLUSTER: + return perf_stat__get_cluster_file; + case AGGR_CACHE: + return perf_stat__get_cache_file; + case AGGR_CORE: + return perf_stat__get_core_file; + case AGGR_NODE: + return perf_stat__get_node_file; case AGGR_GLOBAL: + return perf_stat__get_global_file; + case AGGR_NONE: + return perf_stat__get_cpu_file; case AGGR_THREAD: case AGGR_UNSET: + case AGGR_MAX: default: - break; + return NULL; } +} + +static int perf_stat_init_aggr_mode_file(struct perf_stat *st) +{ + struct perf_env *env = &st->session->header.env; + aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); + bool needs_sort = stat_config.aggr_mode != AGGR_NONE; + + if (stat_config.aggr_mode == AGGR_THREAD) { + int nr = perf_thread_map__nr(evsel_list->core.threads); + + stat_config.aggr_map = cpu_aggr_map__empty_new(nr); + if (stat_config.aggr_map == NULL) + return -ENOMEM; + for (int s = 0; s < nr; s++) { + struct aggr_cpu_id id = aggr_cpu_id__empty(); + + id.thread_idx = s; + stat_config.aggr_map->map[s] = id; + } + return 0; + } + + if (!get_id) + return 0; + + stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, + get_id, env, needs_sort); + if (!stat_config.aggr_map) { + pr_err("cannot build %s map\n", aggr_mode__string[stat_config.aggr_mode]); + return -1; + } + stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); return 0; } @@ -1509,7 +1939,6 @@ static int perf_stat_init_aggr_mode_file(struct perf_stat *st) */ static int add_default_attributes(void) { - int err; struct perf_event_attr default_attrs0[] = { { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, @@ -1622,49 +2051,39 @@ static int add_default_attributes(void) (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, }; - struct parse_events_error errinfo; + + struct perf_event_attr default_null_attrs[] = {}; + const char *pmu = parse_events_option_args.pmu_filter ?: "all"; /* Set attrs if no event is selected and !null_run: */ if (stat_config.null_run) return 0; - bzero(&errinfo, sizeof(errinfo)); if (transaction_run) { /* Handle -T as -M transaction. Once platform specific metrics - * support has been added to the json files, all archictures + * support has been added to the json files, all architectures * will use this approach. To determine transaction support * on an architecture test for such a metric name. */ - if (metricgroup__has_metric("transaction")) { - struct option opt = { .value = &evsel_list }; - - return metricgroup__parse_groups(&opt, "transaction", - stat_config.metric_no_group, - stat_config.metric_no_merge, - &stat_config.metric_events); - } - - if (pmu_have_event("cpu", "cycles-ct") && - pmu_have_event("cpu", "el-start")) - err = parse_events(evsel_list, transaction_attrs, - &errinfo); - else - err = parse_events(evsel_list, - transaction_limited_attrs, - &errinfo); - if (err) { - fprintf(stderr, "Cannot set up transaction events\n"); - parse_events_print_error(&errinfo, transaction_attrs); + if (!metricgroup__has_metric(pmu, "transaction")) { + pr_err("Missing transaction metrics\n"); return -1; } - return 0; + return metricgroup__parse_groups(evsel_list, pmu, "transaction", + stat_config.metric_no_group, + stat_config.metric_no_merge, + stat_config.metric_no_threshold, + stat_config.user_requested_cpu_list, + stat_config.system_wide, + stat_config.hardware_aware_grouping, + &stat_config.metric_events); } if (smi_cost) { int smi; if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { - fprintf(stderr, "freeze_on_smi is not supported.\n"); + pr_err("freeze_on_smi is not supported.\n"); return -1; } @@ -1676,99 +2095,112 @@ static int add_default_attributes(void) smi_reset = true; } - if (pmu_have_event("msr", "aperf") && - pmu_have_event("msr", "smi")) { - if (!force_metric_only) - stat_config.metric_only = true; - err = parse_events(evsel_list, smi_cost_attrs, &errinfo); - } else { - fprintf(stderr, "To measure SMI cost, it needs " - "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); - parse_events_print_error(&errinfo, smi_cost_attrs); + if (!metricgroup__has_metric(pmu, "smi")) { + pr_err("Missing smi metrics\n"); return -1; } - if (err) { - parse_events_print_error(&errinfo, smi_cost_attrs); - fprintf(stderr, "Cannot set up SMI cost events\n"); - return -1; - } - return 0; + + if (!force_metric_only) + stat_config.metric_only = true; + + return metricgroup__parse_groups(evsel_list, pmu, "smi", + stat_config.metric_no_group, + stat_config.metric_no_merge, + stat_config.metric_no_threshold, + stat_config.user_requested_cpu_list, + stat_config.system_wide, + stat_config.hardware_aware_grouping, + &stat_config.metric_events); } if (topdown_run) { - char *str = NULL; - bool warn = false; + unsigned int max_level = metricgroups__topdown_max_level(); + char str[] = "TopdownL1"; if (!force_metric_only) stat_config.metric_only = true; - if (topdown_filter_events(topdown_metric_attrs, &str, 1) < 0) { - pr_err("Out of memory\n"); + if (!max_level) { + pr_err("Topdown requested but the topdown metric groups aren't present.\n" + "(See perf list the metric groups have names like TopdownL1)\n"); return -1; } - if (topdown_metric_attrs[0] && str) { - if (!stat_config.interval && !stat_config.metric_only) { - fprintf(stat_config.output, - "Topdown accuracy may decrease when measuring long periods.\n" - "Please print the result regularly, e.g. -I1000\n"); - } - goto setup_metrics; - } - - zfree(&str); - - if (stat_config.aggr_mode != AGGR_GLOBAL && - stat_config.aggr_mode != AGGR_CORE) { - pr_err("top down event configuration requires --per-core mode\n"); + if (stat_config.topdown_level > max_level) { + pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); return -1; - } - stat_config.aggr_mode = AGGR_CORE; - if (nr_cgroups || !target__has_cpu(&target)) { - pr_err("top down event configuration requires system-wide mode (-a)\n"); - return -1; - } + } else if (!stat_config.topdown_level) + stat_config.topdown_level = 1; - if (topdown_filter_events(topdown_attrs, &str, - arch_topdown_check_group(&warn)) < 0) { - pr_err("Out of memory\n"); - return -1; + if (!stat_config.interval && !stat_config.metric_only) { + fprintf(stat_config.output, + "Topdown accuracy may decrease when measuring long periods.\n" + "Please print the result regularly, e.g. -I1000\n"); } - if (topdown_attrs[0] && str) { - if (warn) - arch_topdown_group_warn(); -setup_metrics: - err = parse_events(evsel_list, str, &errinfo); - if (err) { - fprintf(stderr, - "Cannot set up top down events %s: %d\n", - str, err); - parse_events_print_error(&errinfo, str); - free(str); - return -1; - } - } else { - fprintf(stderr, "System does not support topdown\n"); + str[8] = stat_config.topdown_level + '0'; + if (metricgroup__parse_groups(evsel_list, + pmu, str, + /*metric_no_group=*/false, + /*metric_no_merge=*/false, + /*metric_no_threshold=*/true, + stat_config.user_requested_cpu_list, + stat_config.system_wide, + stat_config.hardware_aware_grouping, + &stat_config.metric_events) < 0) return -1; - } - free(str); } + if (!stat_config.topdown_level) + stat_config.topdown_level = 1; + if (!evsel_list->core.nr_entries) { + /* No events so add defaults. */ if (target__has_cpu(&target)) default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) return -1; - if (pmu_have_event("cpu", "stalled-cycles-frontend")) { + if (perf_pmus__have_event("cpu", "stalled-cycles-frontend")) { if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) return -1; } - if (pmu_have_event("cpu", "stalled-cycles-backend")) { + if (perf_pmus__have_event("cpu", "stalled-cycles-backend")) { if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) return -1; } if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) return -1; + /* + * Add TopdownL1 metrics if they exist. To minimize + * multiplexing, don't request threshold computation. + */ + if (metricgroup__has_metric(pmu, "Default")) { + struct evlist *metric_evlist = evlist__new(); + struct evsel *metric_evsel; + + if (!metric_evlist) + return -1; + + if (metricgroup__parse_groups(metric_evlist, pmu, "Default", + /*metric_no_group=*/false, + /*metric_no_merge=*/false, + /*metric_no_threshold=*/true, + stat_config.user_requested_cpu_list, + stat_config.system_wide, + stat_config.hardware_aware_grouping, + &stat_config.metric_events) < 0) + return -1; + + evlist__for_each_entry(metric_evlist, metric_evsel) { + metric_evsel->skippable = true; + metric_evsel->default_metricgroup = true; + } + evlist__splice_list_tail(evsel_list, &metric_evlist->core.entries); + evlist__delete(metric_evlist); + } + + /* Platform specific attrs */ + if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) + return -1; } /* Detailed events get appended to the event list: */ @@ -1829,7 +2261,7 @@ static int __cmd_record(int argc, const char **argv) return -1; } - session = perf_session__new(data, false, NULL); + session = perf_session__new(data, NULL); if (IS_ERR(session)) { pr_err("Perf session creation failed\n"); return PTR_ERR(session); @@ -1847,13 +2279,11 @@ static int process_stat_round_event(struct perf_session *session, union perf_event *event) { struct perf_record_stat_round *stat_round = &event->stat_round; - struct evsel *counter; struct timespec tsh, *ts = NULL; const char **argv = session->header.env.cmdline_argv; int argc = session->header.env.nr_cmdline; - evlist__for_each_entry(evsel_list, counter) - perf_stat_process_counter(&stat_config, counter); + process_counters(); if (stat_round->type == PERF_STAT_ROUND_TYPE__FINAL) update_stats(&walltime_nsecs_stats, stat_round->time); @@ -1877,20 +2307,26 @@ int process_stat_config_event(struct perf_session *session, perf_event__read_stat_config(&stat_config, &event->stat_config); - if (perf_cpu_map__empty(st->cpus)) { + if (perf_cpu_map__is_empty(st->cpus)) { if (st->aggr_mode != AGGR_UNSET) pr_warning("warning: processing task data, aggregation mode not set\n"); - return 0; - } - - if (st->aggr_mode != AGGR_UNSET) + } else if (st->aggr_mode != AGGR_UNSET) { stat_config.aggr_mode = st->aggr_mode; + } if (perf_stat.data.is_pipe) perf_stat_init_aggr_mode(); else perf_stat_init_aggr_mode_file(st); + if (stat_config.aggr_map) { + int nr_aggr = stat_config.aggr_map->nr; + + if (evlist__alloc_aggr_stats(session->evlist, nr_aggr) < 0) { + pr_err("cannot allocate aggr counts\n"); + return -1; + } + } return 0; } @@ -1904,7 +2340,7 @@ static int set_maps(struct perf_stat *st) perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); - if (perf_evlist__alloc_stats(evsel_list, true)) + if (evlist__alloc_stats(&stat_config, evsel_list, /*alloc_raw=*/true)) return -ENOMEM; st->maps_allocated = true; @@ -1966,7 +2402,8 @@ static struct perf_stat perf_stat = { .stat = perf_event__process_stat_event, .stat_round = process_stat_round_event, }, - .aggr_mode = AGGR_UNSET, + .aggr_mode = AGGR_UNSET, + .aggr_level = 0, }; static int __cmd_report(int argc, const char **argv) @@ -1978,6 +2415,12 @@ static int __cmd_report(int argc, const char **argv) "aggregate counts per processor socket", AGGR_SOCKET), OPT_SET_UINT(0, "per-die", &perf_stat.aggr_mode, "aggregate counts per processor die", AGGR_DIE), + OPT_SET_UINT(0, "per-cluster", &perf_stat.aggr_mode, + "aggregate counts perf processor cluster", AGGR_CLUSTER), + OPT_CALLBACK_OPTARG(0, "per-cache", &perf_stat.aggr_mode, &perf_stat.aggr_level, + "cache level", + "aggregate count at this cache level (Default: LLC)", + parse_cache_level), OPT_SET_UINT(0, "per-core", &perf_stat.aggr_mode, "aggregate counts per physical processor core", AGGR_CORE), OPT_SET_UINT(0, "per-node", &perf_stat.aggr_mode, @@ -2001,12 +2444,13 @@ static int __cmd_report(int argc, const char **argv) perf_stat.data.path = input_name; perf_stat.data.mode = PERF_DATA_MODE_READ; - session = perf_session__new(&perf_stat.data, false, &perf_stat.tool); + session = perf_session__new(&perf_stat.data, &perf_stat.tool); if (IS_ERR(session)) return PTR_ERR(session); perf_stat.session = session; stat_config.output = stderr; + evlist__delete(evsel_list); evsel_list = session->evlist; ret = perf_session__process_events(session); @@ -2037,8 +2481,8 @@ static void setup_system_wide(int forks) struct evsel *counter; evlist__for_each_entry(evsel_list, counter) { - if (!counter->core.system_wide && - strcmp(counter->name, "duration_time")) { + if (!counter->core.requires_cpu && + !evsel__name_is(counter, "duration_time")) { return; } } @@ -2054,11 +2498,12 @@ int cmd_stat(int argc, const char **argv) "perf stat [<options>] [<command>]", NULL }; - int status = -EINVAL, run_idx; + int status = -EINVAL, run_idx, err; const char *mode; FILE *output = stderr; unsigned int interval, timeout; const char * const stat_subcommands[] = { "record", "report" }; + char errbuf[BUFSIZ]; setlocale(LC_ALL, ""); @@ -2076,8 +2521,6 @@ int cmd_stat(int argc, const char **argv) argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, (const char **) stat_usage, PARSE_OPT_STOP_AT_NON_OPTION); - perf_stat__collect_metric_expr(evsel_list); - perf_stat__init_shadow_stats(); if (stat_config.csv_sep) { stat_config.csv_output = true; @@ -2086,11 +2529,11 @@ int cmd_stat(int argc, const char **argv) } else stat_config.csv_sep = DEFAULT_SEPARATOR; - if (argc && !strncmp(argv[0], "rec", 3)) { + if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { argc = __cmd_record(argc, argv); if (argc < 0) return -1; - } else if (argc && !strncmp(argv[0], "rep", 3)) + } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) return __cmd_report(argc, argv); interval = stat_config.interval; @@ -2132,7 +2575,7 @@ int cmd_stat(int argc, const char **argv) goto out; } - if (!output) { + if (!output && !quiet) { struct timespec tm; mode = append_file ? "a" : "w"; @@ -2141,8 +2584,10 @@ int cmd_stat(int argc, const char **argv) perror("failed to create output file"); return -1; } - clock_gettime(CLOCK_REALTIME, &tm); - fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); + if (!stat_config.json_output) { + clock_gettime(CLOCK_REALTIME, &tm); + fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); + } } else if (output_fd > 0) { mode = append_file ? "a" : "w"; output = fdopen(output_fd, mode); @@ -2152,6 +2597,14 @@ int cmd_stat(int argc, const char **argv) } } + if (stat_config.interval_clear && !isatty(fileno(output))) { + fprintf(stderr, "--interval-clear does not work with output\n"); + parse_options_usage(stat_usage, stat_options, "o", 1); + parse_options_usage(NULL, stat_options, "log-fd", 0); + parse_options_usage(NULL, stat_options, "interval-clear", 0); + return -1; + } + stat_config.output = output; /* @@ -2169,6 +2622,12 @@ int cmd_stat(int argc, const char **argv) } else if (big_num_opt == 0) /* User passed --no-big-num */ stat_config.big_num = false; + err = target__validate(&target); + if (err) { + target__strerror(&target, err, errbuf, BUFSIZ); + pr_warning("%s\n", errbuf); + } + setup_system_wide(argc); /* @@ -2212,7 +2671,8 @@ int cmd_stat(int argc, const char **argv) * --per-thread is aggregated per thread, we dont mix it with cpu mode */ if (((stat_config.aggr_mode != AGGR_GLOBAL && - stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) && + stat_config.aggr_mode != AGGR_THREAD) || + (nr_cgroups || stat_config.cgroup_list)) && !target__has_cpu(&target)) { fprintf(stderr, "both cgroup and no-aggregation " "modes only available in system-wide mode\n"); @@ -2220,9 +2680,57 @@ int cmd_stat(int argc, const char **argv) parse_options_usage(stat_usage, stat_options, "G", 1); parse_options_usage(NULL, stat_options, "A", 1); parse_options_usage(NULL, stat_options, "a", 1); + parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); goto out; } + if (stat_config.iostat_run) { + status = iostat_prepare(evsel_list, &stat_config); + if (status) + goto out; + if (iostat_mode == IOSTAT_LIST) { + iostat_list(evsel_list, &stat_config); + goto out; + } else if (verbose > 0) + iostat_list(evsel_list, &stat_config); + if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) + target.system_wide = true; + } + + if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) + target.per_thread = true; + + stat_config.system_wide = target.system_wide; + if (target.cpu_list) { + stat_config.user_requested_cpu_list = strdup(target.cpu_list); + if (!stat_config.user_requested_cpu_list) { + status = -ENOMEM; + goto out; + } + } + + /* + * Metric parsing needs to be delayed as metrics may optimize events + * knowing the target is system-wide. + */ + if (metrics) { + const char *pmu = parse_events_option_args.pmu_filter ?: "all"; + int ret = metricgroup__parse_groups(evsel_list, pmu, metrics, + stat_config.metric_no_group, + stat_config.metric_no_merge, + stat_config.metric_no_threshold, + stat_config.user_requested_cpu_list, + stat_config.system_wide, + stat_config.hardware_aware_grouping, + &stat_config.metric_events); + + zfree(&metrics); + if (ret) { + status = ret; + goto out; + } + } + if (add_default_attributes()) goto out; @@ -2235,16 +2743,16 @@ int cmd_stat(int argc, const char **argv) } if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, - &stat_config.metric_events, true) < 0) + &stat_config.metric_events, true) < 0) { + parse_options_usage(stat_usage, stat_options, + "for-each-cgroup", 0); goto out; + } } - target__validate(&target); - - if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) - target.per_thread = true; + evlist__warn_user_requested_cpus(evsel_list, target.cpu_list); - if (perf_evlist__create_maps(evsel_list, &target) < 0) { + if (evlist__create_maps(evsel_list, &target) < 0) { if (target__has_task(&target)) { pr_err("Problems finding threads of monitor\n"); parse_options_usage(stat_usage, stat_options, "p", 1); @@ -2265,12 +2773,6 @@ int cmd_stat(int argc, const char **argv) */ if (stat_config.aggr_mode == AGGR_THREAD) { thread_map__read_comms(evsel_list->core.threads); - if (target.system_wide) { - if (runtime_stat_new(&stat_config, - perf_thread_map__nr(evsel_list->core.threads))) { - goto out; - } - } } if (stat_config.aggr_mode == AGGR_NODE) @@ -2303,10 +2805,10 @@ int cmd_stat(int argc, const char **argv) goto out; } - if (perf_evlist__alloc_stats(evsel_list, interval)) + if (perf_stat_init_aggr_mode()) goto out; - if (perf_stat_init_aggr_mode()) + if (evlist__alloc_stats(&stat_config, evsel_list, interval)) goto out; /* @@ -2336,6 +2838,8 @@ int cmd_stat(int argc, const char **argv) if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) goto out; + /* Enable ignoring missing threads when -p option is defined. */ + evlist__first(evsel_list)->ignore_missing_thread = target.pid; status = 0; for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { if (stat_config.run_count != 1 && verbose > 0) @@ -2343,7 +2847,7 @@ int cmd_stat(int argc, const char **argv) run_idx + 1); if (run_idx != 0) - perf_evlist__reset_prev_raw_counts(evsel_list); + evlist__reset_prev_raw_counts(evsel_list); status = run_perf_stat(argc, argv, run_idx); if (forever && status != -1 && !interval) { @@ -2352,8 +2856,11 @@ int cmd_stat(int argc, const char **argv) } } - if (!forever && status != -1 && (!interval || stat_config.summary)) + if (!forever && status != -1 && (!interval || stat_config.summary)) { + if (stat_config.run_count > 1) + evlist__copy_res_stats(&stat_config, evsel_list); print_counters(NULL, argc, argv); + } evlist__finalize_ctlfd(evsel_list); @@ -2361,7 +2868,7 @@ int cmd_stat(int argc, const char **argv) /* * We synthesize the kernel mmap record just so that older tools * don't emit warnings about not being able to resolve symbols - * due to /proc/sys/kernel/kptr_restrict settings and instear provide + * due to /proc/sys/kernel/kptr_restrict settings and instead provide * a saner message about no samples being in the perf.data file. * * This also serves to suppress a warning about f_header.data.size == 0 @@ -2371,9 +2878,10 @@ int cmd_stat(int argc, const char **argv) * tools remain -acme */ int fd = perf_data__fd(&perf_stat.data); - int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, - process_synthesized_event, - &perf_stat.session->machines.host); + + err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, + process_synthesized_event, + &perf_stat.session->machines.host); if (err) { pr_warning("Couldn't synthesize the kernel mmap record, harmless, " "older tools may produce warnings about this file\n."); @@ -2394,9 +2902,13 @@ int cmd_stat(int argc, const char **argv) } perf_stat__exit_aggr_mode(); - perf_evlist__free_stats(evsel_list); + evlist__free_stats(evsel_list); out: + if (stat_config.iostat_run) + iostat_release(evsel_list); + zfree(&stat_config.walltime_run); + zfree(&stat_config.user_requested_cpu_list); if (smi_cost && smi_reset) sysfs__write_int(FREEZE_ON_SMI_PATH, 0); @@ -2404,7 +2916,6 @@ out: evlist__delete(evsel_list); metricgroup__rblist_exit(&stat_config.metric_events); - runtime_stat_delete(&stat_config); evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); return status; |