diff options
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r-- | tools/perf/builtin-stat.c | 1471 |
1 files changed, 942 insertions, 529 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index a098c2ebf4ea..265b05157972 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -48,6 +48,7 @@ #include "util/pmu.h" #include "util/event.h" #include "util/evlist.h" +#include "util/evlist-hybrid.h" #include "util/evsel.h" #include "util/debug.h" #include "util/color.h" @@ -56,7 +57,7 @@ #include "util/cpumap.h" #include "util/thread_map.h" #include "util/counts.h" -#include "util/group.h" +#include "util/topdown.h" #include "util/session.h" #include "util/tool.h" #include "util/string2.h" @@ -66,6 +67,10 @@ #include "util/time-utils.h" #include "util/top.h" #include "util/affinity.h" +#include "util/pfm.h" +#include "util/bpf_counter.h" +#include "util/iostat.h" +#include "util/pmu-hybrid.h" #include "asm/bug.h" #include <linux/time64.h> @@ -127,6 +132,30 @@ static const char * topdown_attrs[] = { NULL, }; +static const char *topdown_metric_attrs[] = { + "slots", + "topdown-retiring", + "topdown-bad-spec", + "topdown-fe-bound", + "topdown-be-bound", + NULL, +}; + +static const char *topdown_metric_L2_attrs[] = { + "slots", + "topdown-retiring", + "topdown-bad-spec", + "topdown-fe-bound", + "topdown-be-bound", + "topdown-heavy-ops", + "topdown-br-mispredict", + "topdown-fetch-lat", + "topdown-mem-bound", + NULL, +}; + +#define TOPDOWN_MAX_LEVEL 2 + static const char *smi_cost_attrs = { "{" "msr/aperf/," @@ -136,6 +165,7 @@ static const char *smi_cost_attrs = { }; static struct evlist *evsel_list; +static bool all_counters_use_bpf = true; static struct target target = { .uid = UINT_MAX, @@ -161,6 +191,7 @@ static bool append_file; static bool interval_count; static const char *output_name; static int output_fd; +static char *metrics; struct perf_stat { bool record; @@ -186,9 +217,67 @@ static struct perf_stat_config stat_config = { .run_count = 1, .metric_only_len = METRIC_ONLY_LEN, .walltime_nsecs_stats = &walltime_nsecs_stats, + .ru_stats = &ru_stats, .big_num = true, + .ctl_fd = -1, + .ctl_fd_ack = -1, + .iostat_run = false, }; +static bool cpus_map_matched(struct evsel *a, struct evsel *b) +{ + if (!a->core.cpus && !b->core.cpus) + return true; + + if (!a->core.cpus || !b->core.cpus) + return false; + + if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) + return false; + + for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { + if (perf_cpu_map__cpu(a->core.cpus, i).cpu != + perf_cpu_map__cpu(b->core.cpus, i).cpu) + return false; + } + + return true; +} + +static void evlist__check_cpu_maps(struct evlist *evlist) +{ + struct evsel *evsel, *pos, *leader; + char buf[1024]; + + if (evlist__has_hybrid(evlist)) + evlist__warn_hybrid_group(evlist); + + evlist__for_each_entry(evlist, evsel) { + leader = evsel__leader(evsel); + + /* Check that leader matches cpus with each member. */ + if (leader == evsel) + continue; + if (cpus_map_matched(leader, evsel)) + continue; + + /* If there's mismatch disable the group and warn user. */ + WARN_ONCE(1, "WARNING: grouped events cpus do not match, disabling group:\n"); + evsel__group_desc(leader, buf, sizeof(buf)); + pr_warning(" %s\n", buf); + + if (verbose) { + cpu_map__snprint(leader->core.cpus, buf, sizeof(buf)); + pr_warning(" %s: %s\n", leader->name, buf); + cpu_map__snprint(evsel->core.cpus, buf, sizeof(buf)); + pr_warning(" %s: %s\n", evsel->name, buf); + } + + for_each_group_evsel(pos, leader) + evsel__remove_from_group(pos, leader); + } +} + static inline void diff_timespec(struct timespec *r, struct timespec *a, struct timespec *b) { @@ -203,13 +292,8 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a, static void perf_stat__reset_stats(void) { - int i; - - perf_evlist__reset_stats(evsel_list); + evlist__reset_stats(evsel_list); perf_stat__reset_shadow_stats(); - - for (i = 0; i < stat_config.stats_num; i++) - perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); } static int process_synthesized_event(struct perf_tool *tool __maybe_unused, @@ -238,35 +322,55 @@ static int write_stat_round_event(u64 tm, u64 type) #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) -static int -perf_evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, - struct perf_counts_values *count) +static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, + struct perf_counts_values *count) { - struct perf_sample_id *sid = SID(counter, cpu, thread); + struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); + struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, process_synthesized_event, NULL); } -static int read_single_counter(struct evsel *counter, int cpu, +static int read_single_counter(struct evsel *counter, int cpu_map_idx, int thread, struct timespec *rs) { - if (counter->tool_event == PERF_TOOL_DURATION_TIME) { - u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; - struct perf_counts_values *count = - perf_counts(counter->counts, cpu, thread); - count->ena = count->run = val; - count->val = val; - return 0; + switch(counter->tool_event) { + case PERF_TOOL_DURATION_TIME: { + u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; + struct perf_counts_values *count = + perf_counts(counter->counts, cpu_map_idx, thread); + count->ena = count->run = val; + count->val = val; + return 0; + } + case PERF_TOOL_USER_TIME: + case PERF_TOOL_SYSTEM_TIME: { + u64 val; + struct perf_counts_values *count = + perf_counts(counter->counts, cpu_map_idx, thread); + if (counter->tool_event == PERF_TOOL_USER_TIME) + val = ru_stats.ru_utime_usec_stat.mean; + else + val = ru_stats.ru_stime_usec_stat.mean; + count->ena = count->run = val; + count->val = val; + return 0; + } + default: + case PERF_TOOL_NONE: + return evsel__read_counter(counter, cpu_map_idx, thread); + case PERF_TOOL_MAX: + /* This should never be reached */ + return 0; } - return perf_evsel__read_counter(counter, cpu, thread); } /* * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */ -static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) +static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) { int nthreads = perf_thread_map__nr(evsel_list->core.threads); int thread; @@ -274,30 +378,27 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) if (!counter->supported) return -ENOENT; - if (counter->core.system_wide) - nthreads = 1; - for (thread = 0; thread < nthreads; thread++) { struct perf_counts_values *count; - count = perf_counts(counter->counts, cpu, thread); + count = perf_counts(counter->counts, cpu_map_idx, thread); /* * The leader's group read loads data into its group members - * (via perf_evsel__read_counter()) and sets their count->loaded. + * (via evsel__read_counter()) and sets their count->loaded. */ - if (!perf_counts__is_loaded(counter->counts, cpu, thread) && - read_single_counter(counter, cpu, thread, rs)) { + if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && + read_single_counter(counter, cpu_map_idx, thread, rs)) { counter->counts->scaled = -1; - perf_counts(counter->counts, cpu, thread)->ena = 0; - perf_counts(counter->counts, cpu, thread)->run = 0; + perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; + perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; return -1; } - perf_counts__set_loaded(counter->counts, cpu, thread, false); + perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); if (STAT_RECORD) { - if (perf_evsel__write_stat_event(counter, cpu, thread, count)) { + if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { pr_err("failed to write stat event\n"); return -1; } @@ -306,8 +407,9 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) if (verbose > 1) { fprintf(stat_config.output, "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", - perf_evsel__name(counter), - cpu, + evsel__name(counter), + perf_cpu_map__cpu(evsel__cpus(counter), + cpu_map_idx).cpu, count->val, count->ena, count->run); } } @@ -315,33 +417,63 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) return 0; } -static void read_counters(struct timespec *rs) +static int read_affinity_counters(struct timespec *rs) { - struct evsel *counter; - struct affinity affinity; - int i, ncpus, cpu; + struct evlist_cpu_iterator evlist_cpu_itr; + struct affinity saved_affinity, *affinity; - if (affinity__setup(&affinity) < 0) - return; + if (all_counters_use_bpf) + return 0; - ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); if (!target__has_cpu(&target) || target__has_per_thread(&target)) - ncpus = 1; - evlist__for_each_cpu(evsel_list, i, cpu) { - if (i >= ncpus) - break; - affinity__set(&affinity, cpu); + affinity = NULL; + else if (affinity__setup(&saved_affinity) < 0) + return -1; + else + affinity = &saved_affinity; - evlist__for_each_entry(evsel_list, counter) { - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (!counter->err) { - counter->err = read_counter_cpu(counter, rs, - counter->cpu_iter - 1); - } + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + struct evsel *counter = evlist_cpu_itr.evsel; + + if (evsel__is_bpf(counter)) + continue; + + if (!counter->err) { + counter->err = read_counter_cpu(counter, rs, + evlist_cpu_itr.cpu_map_idx); } } - affinity__cleanup(&affinity); + if (affinity) + affinity__cleanup(&saved_affinity); + + return 0; +} + +static int read_bpf_map_counters(void) +{ + struct evsel *counter; + int err; + + evlist__for_each_entry(evsel_list, counter) { + if (!evsel__is_bpf(counter)) + continue; + + err = bpf_counter__read(counter); + if (err) + return err; + } + return 0; +} + +static void read_counters(struct timespec *rs) +{ + struct evsel *counter; + + if (!stat_config.stop_read_counter) { + if (read_bpf_map_counters() || + read_affinity_counters(rs)) + return; + } evlist__for_each_entry(evsel_list, counter) { if (counter->err) @@ -359,6 +491,7 @@ static void process_interval(void) clock_gettime(CLOCK_MONOTONIC, &ts); diff_timespec(&rs, &ts, &ref_time); + perf_stat__reset_shadow_per_stat(&rt_stat); read_counters(&rs); if (STAT_RECORD) { @@ -367,39 +500,79 @@ static void process_interval(void) } init_stats(&walltime_nsecs_stats); - update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000); + update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL); print_counters(&rs, 0, NULL); } -static void enable_counters(void) +static bool handle_interval(unsigned int interval, int *times) +{ + if (interval) { + process_interval(); + if (interval_count && !(--(*times))) + return true; + } + return false; +} + +static int enable_counters(void) { - if (stat_config.initial_delay) + struct evsel *evsel; + int err; + + evlist__for_each_entry(evsel_list, evsel) { + if (!evsel__is_bpf(evsel)) + continue; + + err = bpf_counter__enable(evsel); + if (err) + return err; + } + + if (stat_config.initial_delay < 0) { + pr_info(EVLIST_DISABLED_MSG); + return 0; + } + + if (stat_config.initial_delay > 0) { + pr_info(EVLIST_DISABLED_MSG); usleep(stat_config.initial_delay * USEC_PER_MSEC); + } /* * We need to enable counters only if: * - we don't have tracee (attaching to task or cpu) * - we have initial delay configured */ - if (!target__none(&target) || stat_config.initial_delay) - evlist__enable(evsel_list); + if (!target__none(&target) || stat_config.initial_delay) { + if (!all_counters_use_bpf) + evlist__enable(evsel_list); + if (stat_config.initial_delay > 0) + pr_info(EVLIST_ENABLED_MSG); + } + return 0; } static void disable_counters(void) { + struct evsel *counter; + /* * If we don't have tracee (attaching to task or cpu), counters may * still be running. To get accurate group ratios, we must stop groups * from counting before reading their constituent counters. */ - if (!target__none(&target)) - evlist__disable(evsel_list); + if (!target__none(&target)) { + evlist__for_each_entry(evsel_list, counter) + bpf_counter__disable(counter); + if (!all_counters_use_bpf) + evlist__disable(evsel_list); + } } static volatile int workload_exec_errno; /* - * perf_evlist__prepare_workload will send a SIGUSR1 + * evlist__prepare_workload will send a SIGUSR1 * if the fork fails, since we asked by setting its * want_signal to true. */ @@ -409,7 +582,7 @@ static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *inf workload_exec_errno = info->si_value.sival_int; } -static bool perf_evsel__should_store_id(struct evsel *counter) +static bool evsel__should_store_id(struct evsel *counter) { return STAT_RECORD || counter->core.attr.read_format & PERF_FORMAT_ID; } @@ -436,6 +609,86 @@ static bool is_target_alive(struct target *_target, return false; } +static void process_evlist(struct evlist *evlist, unsigned int interval) +{ + enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED; + + if (evlist__ctlfd_process(evlist, &cmd) > 0) { + switch (cmd) { + case EVLIST_CTL_CMD_ENABLE: + __fallthrough; + case EVLIST_CTL_CMD_DISABLE: + if (interval) + process_interval(); + break; + case EVLIST_CTL_CMD_SNAPSHOT: + case EVLIST_CTL_CMD_ACK: + case EVLIST_CTL_CMD_UNSUPPORTED: + case EVLIST_CTL_CMD_EVLIST: + case EVLIST_CTL_CMD_STOP: + case EVLIST_CTL_CMD_PING: + default: + break; + } + } +} + +static void compute_tts(struct timespec *time_start, struct timespec *time_stop, + int *time_to_sleep) +{ + int tts = *time_to_sleep; + struct timespec time_diff; + + diff_timespec(&time_diff, time_stop, time_start); + + tts -= time_diff.tv_sec * MSEC_PER_SEC + + time_diff.tv_nsec / NSEC_PER_MSEC; + + if (tts < 0) + tts = 0; + + *time_to_sleep = tts; +} + +static int dispatch_events(bool forks, int timeout, int interval, int *times) +{ + int child_exited = 0, status = 0; + int time_to_sleep, sleep_time; + struct timespec time_start, time_stop; + + if (interval) + sleep_time = interval; + else if (timeout) + sleep_time = timeout; + else + sleep_time = 1000; + + time_to_sleep = sleep_time; + + while (!done) { + if (forks) + child_exited = waitpid(child_pid, &status, WNOHANG); + else + child_exited = !is_target_alive(&target, evsel_list->core.threads) ? 1 : 0; + + if (child_exited) + break; + + clock_gettime(CLOCK_MONOTONIC, &time_start); + if (!(evlist__poll(evsel_list, time_to_sleep) > 0)) { /* poll timeout or EINTR */ + if (timeout || handle_interval(interval, times)) + break; + time_to_sleep = sleep_time; + } else { /* fd revent */ + process_evlist(evsel_list, interval); + clock_gettime(CLOCK_MONOTONIC, &time_stop); + compute_tts(&time_start, &time_stop, &time_to_sleep); + } + } + + return status; +} + enum counter_recovery { COUNTER_SKIP, COUNTER_RETRY, @@ -454,7 +707,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) errno == ENXIO) { if (verbose > 0) ui__warning("%s event is not supported by the kernel.\n", - perf_evsel__name(counter)); + evsel__name(counter)); counter->supported = false; /* * errored is a sticky flag that means one of the counter's @@ -462,10 +715,10 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) */ counter->errored = true; - if ((counter->leader != counter) || - !(counter->leader->core.nr_members > 1)) + if ((evsel__leader(counter) != counter) || + !(counter->core.leader->nr_members > 1)) return COUNTER_SKIP; - } else if (perf_evsel__fallback(counter, errno, msg, sizeof(msg))) { + } else if (evsel__fallback(counter, errno, msg, sizeof(msg))) { if (verbose > 0) ui__warning("%s\n", msg); return COUNTER_RETRY; @@ -483,8 +736,7 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) } } - perf_evsel__open_strerror(counter, &target, - errno, msg, sizeof(msg)); + evsel__open_strerror(counter, &target, errno, msg, sizeof(msg)); ui__error("%s\n", msg); if (child_pid != -1) @@ -500,29 +752,17 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) char msg[BUFSIZ]; unsigned long long t0, t1; struct evsel *counter; - struct timespec ts; size_t l; int status = 0; const bool forks = (argc > 0); bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; - struct affinity affinity; - int i, cpu; + struct evlist_cpu_iterator evlist_cpu_itr; + struct affinity saved_affinity, *affinity = NULL; + int err; bool second_pass = false; - if (interval) { - ts.tv_sec = interval / USEC_PER_MSEC; - ts.tv_nsec = (interval % USEC_PER_MSEC) * NSEC_PER_MSEC; - } else if (timeout) { - ts.tv_sec = timeout / USEC_PER_MSEC; - ts.tv_nsec = (timeout % USEC_PER_MSEC) * NSEC_PER_MSEC; - } else { - ts.tv_sec = 1; - ts.tv_nsec = 0; - } - if (forks) { - if (perf_evlist__prepare_workload(evsel_list, &target, argv, is_pipe, - workload_exec_failed_signal) < 0) { + if (evlist__prepare_workload(evsel_list, &target, argv, is_pipe, workload_exec_failed_signal) < 0) { perror("failed to prepare workload"); return -1; } @@ -530,53 +770,69 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) } if (group) - perf_evlist__set_leader(evsel_list); + evlist__set_leader(evsel_list); - if (affinity__setup(&affinity) < 0) - return -1; + if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { + if (affinity__setup(&saved_affinity) < 0) + return -1; + affinity = &saved_affinity; + } - evlist__for_each_cpu (evsel_list, i, cpu) { - affinity__set(&affinity, cpu); + evlist__for_each_entry(evsel_list, counter) { + counter->reset_group = false; + if (bpf_counter__load(counter, &target)) + return -1; + if (!evsel__is_bpf(counter)) + all_counters_use_bpf = false; + } - evlist__for_each_entry(evsel_list, counter) { - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (counter->reset_group || counter->errored) - continue; -try_again: - if (create_perf_stat_counter(counter, &stat_config, &target, - counter->cpu_iter - 1) < 0) { - - /* - * Weak group failed. We cannot just undo this here - * because earlier CPUs might be in group mode, and the kernel - * doesn't support mixing group and non group reads. Defer - * it to later. - * Don't close here because we're in the wrong affinity. - */ - if ((errno == EINVAL || errno == EBADF) && - counter->leader != counter && - counter->weak_group) { - perf_evlist__reset_weak_group(evsel_list, counter, false); - assert(counter->reset_group); - second_pass = true; - continue; - } + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + counter = evlist_cpu_itr.evsel; - switch (stat_handle_error(counter)) { - case COUNTER_FATAL: - return -1; - case COUNTER_RETRY: - goto try_again; - case COUNTER_SKIP: - continue; - default: - break; - } + /* + * bperf calls evsel__open_per_cpu() in bperf__load(), so + * no need to call it again here. + */ + if (target.use_bpf) + break; + if (counter->reset_group || counter->errored) + continue; + if (evsel__is_bpf(counter)) + continue; +try_again: + if (create_perf_stat_counter(counter, &stat_config, &target, + evlist_cpu_itr.cpu_map_idx) < 0) { + + /* + * Weak group failed. We cannot just undo this here + * because earlier CPUs might be in group mode, and the kernel + * doesn't support mixing group and non group reads. Defer + * it to later. + * Don't close here because we're in the wrong affinity. + */ + if ((errno == EINVAL || errno == EBADF) && + evsel__leader(counter) != counter && + counter->weak_group) { + evlist__reset_weak_group(evsel_list, counter, false); + assert(counter->reset_group); + second_pass = true; + continue; } - counter->supported = true; + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again; + case COUNTER_SKIP: + continue; + default: + break; + } + } + counter->supported = true; } if (second_pass) { @@ -585,45 +841,41 @@ try_again: * and also close errored counters. */ - evlist__for_each_cpu(evsel_list, i, cpu) { - affinity__set(&affinity, cpu); - /* First close errored or weak retry */ - evlist__for_each_entry(evsel_list, counter) { - if (!counter->reset_group && !counter->errored) - continue; - if (evsel__cpu_iter_skip_no_inc(counter, cpu)) - continue; - perf_evsel__close_cpu(&counter->core, counter->cpu_iter); - } - /* Now reopen weak */ - evlist__for_each_entry(evsel_list, counter) { - if (!counter->reset_group && !counter->errored) - continue; - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (!counter->reset_group) - continue; + /* First close errored or weak retry */ + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + counter = evlist_cpu_itr.evsel; + + if (!counter->reset_group && !counter->errored) + continue; + + perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); + } + /* Now reopen weak */ + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + counter = evlist_cpu_itr.evsel; + + if (!counter->reset_group) + continue; try_again_reset: - pr_debug2("reopening weak %s\n", perf_evsel__name(counter)); - if (create_perf_stat_counter(counter, &stat_config, &target, - counter->cpu_iter - 1) < 0) { - - switch (stat_handle_error(counter)) { - case COUNTER_FATAL: - return -1; - case COUNTER_RETRY: - goto try_again_reset; - case COUNTER_SKIP: - continue; - default: - break; - } + pr_debug2("reopening weak %s\n", evsel__name(counter)); + if (create_perf_stat_counter(counter, &stat_config, &target, + evlist_cpu_itr.cpu_map_idx) < 0) { + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again_reset; + case COUNTER_SKIP: + continue; + default: + break; } - counter->supported = true; } + counter->supported = true; } } - affinity__cleanup(&affinity); + affinity__cleanup(affinity); evlist__for_each_entry(evsel_list, counter) { if (!counter->supported) { @@ -635,20 +887,20 @@ try_again_reset: if (l > stat_config.unit_width) stat_config.unit_width = l; - if (perf_evsel__should_store_id(counter) && - perf_evsel__store_ids(counter, evsel_list)) + if (evsel__should_store_id(counter) && + evsel__store_ids(counter, evsel_list)) return -1; } - if (perf_evlist__apply_filters(evsel_list, &counter)) { + if (evlist__apply_filters(evsel_list, &counter)) { pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n", - counter->filter, perf_evsel__name(counter), errno, + counter->filter, evsel__name(counter), errno, str_error_r(errno, msg, sizeof(msg))); return -1; } if (STAT_RECORD) { - int err, fd = perf_data__fd(&perf_stat.data); + int fd = perf_data__fd(&perf_stat.data); if (is_pipe) { err = perf_header__write_pipe(perf_data__fd(&perf_stat.data)); @@ -666,28 +918,25 @@ try_again_reset: return err; } - /* - * Enable counters and exec the command: - */ + err = enable_counters(); + if (err) + return -1; + + /* Exec the command, if any */ + if (forks) + evlist__start_workload(evsel_list); + t0 = rdclock(); clock_gettime(CLOCK_MONOTONIC, &ref_time); if (forks) { - perf_evlist__start_workload(evsel_list); - enable_counters(); - - if (interval || timeout) { - while (!waitpid(child_pid, &status, WNOHANG)) { - nanosleep(&ts, NULL); - if (timeout) - break; - process_interval(); - if (interval_count && !(--times)) - break; - } - } - if (child_pid != -1) + if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) + status = dispatch_events(forks, timeout, interval, ×); + if (child_pid != -1) { + if (timeout) + kill(child_pid, SIGTERM); wait4(child_pid, &status, 0, &stat_config.ru_data); + } if (workload_exec_errno) { const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg)); @@ -698,19 +947,7 @@ try_again_reset: if (WIFSIGNALED(status)) psignal(WTERMSIG(status), argv[0]); } else { - enable_counters(); - while (!done) { - nanosleep(&ts, NULL); - if (!is_target_alive(&target, evsel_list->core.threads)) - break; - if (timeout) - break; - if (interval) { - process_interval(); - if (interval_count && !(--times)) - break; - } - } + status = dispatch_events(forks, timeout, interval, ×); } disable_counters(); @@ -720,7 +957,22 @@ try_again_reset: if (stat_config.walltime_run_table) stat_config.walltime_run[run_idx] = t1 - t0; - update_stats(&walltime_nsecs_stats, t1 - t0); + if (interval && stat_config.summary) { + stat_config.interval = 0; + stat_config.stop_read_counter = true; + init_stats(&walltime_nsecs_stats); + update_stats(&walltime_nsecs_stats, t1 - t0); + + if (stat_config.aggr_mode == AGGR_GLOBAL) + evlist__save_aggr_prev_raw_counts(evsel_list); + + evlist__copy_prev_raw_counts(evsel_list); + evlist__reset_prev_raw_counts(evsel_list); + perf_stat__reset_shadow_per_stat(&rt_stat); + } else { + update_stats(&walltime_nsecs_stats, t1 - t0); + update_rusage_stats(&ru_stats, &stat_config.ru_data); + } /* * Closing a group leader splits the group, and as we only disable @@ -771,9 +1023,10 @@ static void print_counters(struct timespec *ts, int argc, const char **argv) /* Do not print anything if we record to the pipe. */ if (STAT_RECORD && perf_stat.data.is_pipe) return; + if (stat_config.quiet) + return; - perf_evlist__print_counters(evsel_list, &stat_config, &target, - ts, argc, argv); + evlist__print_counters(evsel_list, &stat_config, &target, ts, argc, argv); } static volatile int signr = -1; @@ -819,10 +1072,21 @@ static void sig_atexit(void) kill(getpid(), signr); } +void perf_stat__set_big_num(int set) +{ + stat_config.big_num = (set != 0); +} + +void perf_stat__set_no_csv_summary(int set) +{ + stat_config.no_csv_summary = (set != 0); +} + static int stat__set_big_num(const struct option *opt __maybe_unused, const char *s __maybe_unused, int unset) { big_num_opt = unset ? 0 : 1; + perf_stat__set_big_num(!unset); return 0; } @@ -834,11 +1098,63 @@ static int enable_metric_only(const struct option *opt __maybe_unused, return 0; } -static int parse_metric_groups(const struct option *opt, +static int append_metric_groups(const struct option *opt __maybe_unused, const char *str, int unset __maybe_unused) { - return metricgroup__parse_groups(opt, str, &stat_config.metric_events); + if (metrics) { + char *tmp; + + if (asprintf(&tmp, "%s,%s", metrics, str) < 0) + return -ENOMEM; + free(metrics); + metrics = tmp; + } else { + metrics = strdup(str); + if (!metrics) + return -ENOMEM; + } + return 0; +} + +static int parse_control_option(const struct option *opt, + const char *str, + int unset __maybe_unused) +{ + struct perf_stat_config *config = opt->value; + + return evlist__parse_control(str, &config->ctl_fd, &config->ctl_fd_ack, &config->ctl_fd_close); +} + +static int parse_stat_cgroups(const struct option *opt, + const char *str, int unset) +{ + if (stat_config.cgroup_list) { + pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); + return -1; + } + + return parse_cgroups(opt, str, unset); +} + +static int parse_hybrid_type(const struct option *opt, + const char *str, + int unset __maybe_unused) +{ + struct evlist *evlist = *(struct evlist **)opt->value; + + if (!list_empty(&evlist->core.entries)) { + fprintf(stderr, "Must define cputype before events/metrics\n"); + return -1; + } + + evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str); + if (!evlist->hybrid_pmu_name) { + fprintf(stderr, "--cputype %s is not supported!\n", str); + return -1; + } + + return 0; } static struct option stat_options[] = { @@ -855,6 +1171,14 @@ static struct option stat_options[] = { "stat events on existing process id"), OPT_STRING('t', "tid", &target.tid, "tid", "stat events on existing thread id"), +#ifdef HAVE_BPF_SKEL + OPT_STRING('b', "bpf-prog", &target.bpf_str, "bpf-prog-id", + "stat events on existing bpf program id"), + OPT_BOOLEAN(0, "bpf-counters", &target.use_bpf, + "use bpf program to count events"), + OPT_STRING(0, "bpf-attr-map", &target.attr_map, "attr-map-path", + "path to perf_event_attr map"), +#endif OPT_BOOLEAN('a', "all-cpus", &target.system_wide, "system-wide collection from all CPUs"), OPT_BOOLEAN('g', "group", &group, @@ -881,10 +1205,16 @@ static struct option stat_options[] = { OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, "disable CPU count aggregation", AGGR_NONE), OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), + OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, + "Merge identical named hybrid events"), OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", "print counts with custom separator"), + OPT_BOOLEAN('j', "json-output", &stat_config.json_output, + "print counts in JSON format"), OPT_CALLBACK('G', "cgroup", &evsel_list, "name", - "monitor event in cgroup name only", parse_cgroups), + "monitor event in cgroup name only", parse_stat_cgroups), + OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", + "expand events for each cgroup"), OPT_STRING('o', "output", &output_name, "file", "output file name"), OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), OPT_INTEGER(0, "log-fd", &output_fd, @@ -912,88 +1242,128 @@ static struct option stat_options[] = { "aggregate counts per thread", AGGR_THREAD), OPT_SET_UINT(0, "per-node", &stat_config.aggr_mode, "aggregate counts per numa node", AGGR_NODE), - OPT_UINTEGER('D', "delay", &stat_config.initial_delay, - "ms to wait before starting measurement after program start"), + OPT_INTEGER('D', "delay", &stat_config.initial_delay, + "ms to wait before starting measurement after program start (-1: start with events disabled)"), OPT_CALLBACK_NOOPT(0, "metric-only", &stat_config.metric_only, NULL, "Only print computed metrics. No raw values", enable_metric_only), + OPT_BOOLEAN(0, "metric-no-group", &stat_config.metric_no_group, + "don't group metric events, impacts multiplexing"), + OPT_BOOLEAN(0, "metric-no-merge", &stat_config.metric_no_merge, + "don't try to share events between metrics in a group"), OPT_BOOLEAN(0, "topdown", &topdown_run, - "measure topdown level 1 statistics"), + "measure top-down statistics"), + OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, + "Set the metrics level for the top-down statistics (0: max level)"), OPT_BOOLEAN(0, "smi-cost", &smi_cost, "measure SMI cost"), OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", "monitor specified metrics or metric groups (separated by ,)", - parse_metric_groups), + append_metric_groups), OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, "Configure all used events to run in kernel space.", PARSE_OPT_EXCLUSIVE), OPT_BOOLEAN_FLAG(0, "all-user", &stat_config.all_user, "Configure all used events to run in user space.", PARSE_OPT_EXCLUSIVE), + OPT_BOOLEAN(0, "percore-show-thread", &stat_config.percore_show_thread, + "Use with 'percore' event qualifier to show the event " + "counts of one hardware thread by sum up total hardware " + "threads of same physical core"), + OPT_BOOLEAN(0, "summary", &stat_config.summary, + "print summary for interval mode"), + OPT_BOOLEAN(0, "no-csv-summary", &stat_config.no_csv_summary, + "don't print 'summary' for CSV summary output"), + OPT_BOOLEAN(0, "quiet", &stat_config.quiet, + "don't print output (useful with record)"), + OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", + "Only enable events on applying cpu with this type " + "for hybrid platform (e.g. core or atom)", + parse_hybrid_type), +#ifdef HAVE_LIBPFM + OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", + "libpfm4 event selector. use 'perf list' to list available events", + parse_libpfm_events_option), +#endif + OPT_CALLBACK(0, "control", &stat_config, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]", + "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events).\n" + "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n" + "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.", + parse_control_option), + OPT_CALLBACK_OPTARG(0, "iostat", &evsel_list, &stat_config, "default", + "measure I/O performance metrics provided by arch/platform", + iostat_parse), OPT_END() }; -static int perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) +static const char *const aggr_mode__string[] = { + [AGGR_CORE] = "core", + [AGGR_DIE] = "die", + [AGGR_GLOBAL] = "global", + [AGGR_NODE] = "node", + [AGGR_NONE] = "none", + [AGGR_SOCKET] = "socket", + [AGGR_THREAD] = "thread", + [AGGR_UNSET] = "unset", +}; + +static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__get_socket(map, cpu, NULL); + return aggr_cpu_id__socket(cpu, /*data=*/NULL); } -static int perf_stat__get_die(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) +static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__get_die(map, cpu, NULL); + return aggr_cpu_id__die(cpu, /*data=*/NULL); } -static int perf_stat__get_core(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) +static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__get_core(map, cpu, NULL); + return aggr_cpu_id__core(cpu, /*data=*/NULL); } -static int perf_stat__get_node(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) +static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__get_node(map, cpu, NULL); + return aggr_cpu_id__node(cpu, /*data=*/NULL); } -static int perf_stat__get_aggr(struct perf_stat_config *config, - aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, + aggr_get_id_t get_id, struct perf_cpu cpu) { - int cpu; - - if (idx >= map->nr) - return -1; - - cpu = map->map[idx]; + struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (config->cpus_aggr_map->map[cpu] == -1) - config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); + if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) + config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); - return config->cpus_aggr_map->map[cpu]; + id = config->cpus_aggr_map->map[cpu.cpu]; + return id; } -static int perf_stat__get_socket_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); } -static int perf_stat__get_die_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_die, cpu); } -static int perf_stat__get_core_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_core, cpu); } -static int perf_stat__get_node_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_node, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_node, cpu); } static bool term_percore_set(void) @@ -1008,54 +1378,69 @@ static bool term_percore_set(void) return false; } -static int perf_stat_init_aggr_mode(void) +static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) { - int nr; + switch (aggr_mode) { + case AGGR_SOCKET: + return aggr_cpu_id__socket; + case AGGR_DIE: + return aggr_cpu_id__die; + case AGGR_CORE: + return aggr_cpu_id__core; + case AGGR_NODE: + return aggr_cpu_id__node; + case AGGR_NONE: + if (term_percore_set()) + return aggr_cpu_id__core; + + return NULL; + case AGGR_GLOBAL: + case AGGR_THREAD: + case AGGR_UNSET: + case AGGR_MAX: + default: + return NULL; + } +} - switch (stat_config.aggr_mode) { +static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) +{ + switch (aggr_mode) { case AGGR_SOCKET: - if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build socket map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_socket_cached; - break; + return perf_stat__get_socket_cached; case AGGR_DIE: - if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build die map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_die_cached; - break; + return perf_stat__get_die_cached; case AGGR_CORE: - if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_cached; - break; + return perf_stat__get_core_cached; case AGGR_NODE: - if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_node_cached; - break; + return perf_stat__get_node_cached; case AGGR_NONE: if (term_percore_set()) { - if (cpu_map__build_core_map(evsel_list->core.cpus, - &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_cached; + return perf_stat__get_core_cached; } - break; + return NULL; case AGGR_GLOBAL: case AGGR_THREAD: case AGGR_UNSET: + case AGGR_MAX: default: - break; + return NULL; + } +} + +static int perf_stat_init_aggr_mode(void) +{ + int nr; + aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); + + if (get_id) { + stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, + get_id, /*data=*/NULL); + if (!stat_config.aggr_map) { + pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); + return -1; + } + stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); } /* @@ -1063,243 +1448,175 @@ static int perf_stat_init_aggr_mode(void) * taking the highest cpu number to be the size of * the aggregation translate cpumap. */ - nr = perf_cpu_map__max(evsel_list->core.cpus); - stat_config.cpus_aggr_map = perf_cpu_map__empty_new(nr + 1); + if (evsel_list->core.user_requested_cpus) + nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; + else + nr = 0; + stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); return stat_config.cpus_aggr_map ? 0 : -ENOMEM; } -static void perf_stat__exit_aggr_mode(void) +static void cpu_aggr_map__delete(struct cpu_aggr_map *map) { - perf_cpu_map__put(stat_config.aggr_map); - perf_cpu_map__put(stat_config.cpus_aggr_map); - stat_config.aggr_map = NULL; - stat_config.cpus_aggr_map = NULL; + if (map) { + WARN_ONCE(refcount_read(&map->refcnt) != 0, + "cpu_aggr_map refcnt unbalanced\n"); + free(map); + } } -static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx) +static void cpu_aggr_map__put(struct cpu_aggr_map *map) { - int cpu; - - if (idx > map->nr) - return -1; - - cpu = map->map[idx]; - - if (cpu >= env->nr_cpus_avail) - return -1; + if (map && refcount_dec_and_test(&map->refcnt)) + cpu_aggr_map__delete(map); +} - return cpu; +static void perf_stat__exit_aggr_mode(void) +{ + cpu_aggr_map__put(stat_config.aggr_map); + cpu_aggr_map__put(stat_config.cpus_aggr_map); + stat_config.aggr_map = NULL; + stat_config.cpus_aggr_map = NULL; } -static int perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; - int cpu = perf_env__get_cpu(env, map, idx); + struct aggr_cpu_id id = aggr_cpu_id__empty(); - return cpu == -1 ? -1 : env->cpu[cpu].socket_id; + if (cpu.cpu != -1) + id.socket = env->cpu[cpu.cpu].socket_id; + + return id; } -static int perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; - int die_id = -1, cpu = perf_env__get_cpu(env, map, idx); + struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu != -1) { + if (cpu.cpu != -1) { /* - * Encode socket in bit range 15:8 - * die_id is relative to socket, - * we need a global id. So we combine - * socket + die id + * die_id is relative to socket, so start + * with the socket ID and then add die to + * make a unique ID. */ - if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n")) - return -1; - - if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n")) - return -1; - - die_id = (env->cpu[cpu].socket_id << 8) | (env->cpu[cpu].die_id & 0xff); + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; } - return die_id; + return id; } -static int perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; - int core = -1, cpu = perf_env__get_cpu(env, map, idx); + struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu != -1) { + if (cpu.cpu != -1) { /* - * Encode socket in bit range 31:24 - * encode die id in bit range 23:16 * core_id is relative to socket and die, - * we need a global id. So we combine - * socket + die id + core id + * we need a global id. So we set + * socket, die id and core id */ - if (WARN_ONCE(env->cpu[cpu].socket_id >> 8, "The socket id number is too big.\n")) - return -1; - - if (WARN_ONCE(env->cpu[cpu].die_id >> 8, "The die id number is too big.\n")) - return -1; - - if (WARN_ONCE(env->cpu[cpu].core_id >> 16, "The core id number is too big.\n")) - return -1; - - core = (env->cpu[cpu].socket_id << 24) | - (env->cpu[cpu].die_id << 16) | - (env->cpu[cpu].core_id & 0xffff); + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; + id.core = env->cpu[cpu.cpu].core_id; } - return core; + return id; } -static int perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) { - int cpu = perf_env__get_cpu(data, map, idx); - - return perf_env__numa_node(data, cpu); -} + struct aggr_cpu_id id = aggr_cpu_id__empty(); -static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct perf_cpu_map **sockp) -{ - return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env); -} - -static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct perf_cpu_map **diep) -{ - return cpu_map__build_map(cpus, diep, perf_env__get_die, env); + id.node = perf_env__numa_node(data, cpu); + return id; } -static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct perf_cpu_map **corep) +static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__build_map(cpus, corep, perf_env__get_core, env); + return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); } - -static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct perf_cpu_map **nodep) +static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return cpu_map__build_map(cpus, nodep, perf_env__get_node, env); + return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) -{ - return perf_env__get_socket(map, idx, &perf_stat.session->header.env); -} -static int perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return perf_env__get_die(map, idx, &perf_stat.session->header.env); + return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) +static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, + struct perf_cpu cpu) { - return perf_env__get_core(map, idx, &perf_stat.session->header.env); + return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) +static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) { - return perf_env__get_node(map, idx, &perf_stat.session->header.env); + switch (aggr_mode) { + case AGGR_SOCKET: + return perf_env__get_socket_aggr_by_cpu; + case AGGR_DIE: + return perf_env__get_die_aggr_by_cpu; + case AGGR_CORE: + return perf_env__get_core_aggr_by_cpu; + case AGGR_NODE: + return perf_env__get_node_aggr_by_cpu; + case AGGR_NONE: + case AGGR_GLOBAL: + case AGGR_THREAD: + case AGGR_UNSET: + case AGGR_MAX: + default: + return NULL; + } } -static int perf_stat_init_aggr_mode_file(struct perf_stat *st) +static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) { - struct perf_env *env = &st->session->header.env; - - switch (stat_config.aggr_mode) { + switch (aggr_mode) { case AGGR_SOCKET: - if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build socket map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_socket_file; - break; + return perf_stat__get_socket_file; case AGGR_DIE: - if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build die map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_die_file; - break; + return perf_stat__get_die_file; case AGGR_CORE: - if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_file; - break; + return perf_stat__get_core_file; case AGGR_NODE: - if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_node_file; - break; + return perf_stat__get_node_file; case AGGR_NONE: case AGGR_GLOBAL: case AGGR_THREAD: case AGGR_UNSET: + case AGGR_MAX: default: - break; + return NULL; } - - return 0; } -static int topdown_filter_events(const char **attr, char **str, bool use_group) +static int perf_stat_init_aggr_mode_file(struct perf_stat *st) { - int off = 0; - int i; - int len = 0; - char *s; + struct perf_env *env = &st->session->header.env; + aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); - for (i = 0; attr[i]; i++) { - if (pmu_have_event("cpu", attr[i])) { - len += strlen(attr[i]) + 1; - attr[i - off] = attr[i]; - } else - off++; - } - attr[i - off] = NULL; + if (!get_id) + return 0; - *str = malloc(len + 1 + 2); - if (!*str) + stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, get_id, env); + if (!stat_config.aggr_map) { + pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); return -1; - s = *str; - if (i - off == 0) { - *s = 0; - return 0; - } - if (use_group) - *s++ = '{'; - for (i = 0; attr[i]; i++) { - strcpy(s, attr[i]); - s += strlen(s); - *s++ = ','; } - if (use_group) { - s[-1] = '}'; - *s = 0; - } else - s[-1] = 0; + stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); return 0; } -__weak bool arch_topdown_check_group(bool *warn) -{ - *warn = false; - return false; -} - -__weak void arch_topdown_group_warn(void) -{ -} - /* * Add default attributes, if there were no attributes specified or * if -d/--detailed, -d -d or -d -d -d is used: @@ -1419,26 +1736,30 @@ static int add_default_attributes(void) (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, }; - struct parse_events_error errinfo; + + struct perf_event_attr default_null_attrs[] = {}; /* Set attrs if no event is selected and !null_run: */ if (stat_config.null_run) return 0; - bzero(&errinfo, sizeof(errinfo)); if (transaction_run) { + struct parse_events_error errinfo; /* Handle -T as -M transaction. Once platform specific metrics - * support has been added to the json files, all archictures + * support has been added to the json files, all architectures * will use this approach. To determine transaction support * on an architecture test for such a metric name. */ if (metricgroup__has_metric("transaction")) { - struct option opt = { .value = &evsel_list }; - - return metricgroup__parse_groups(&opt, "transaction", + return metricgroup__parse_groups(evsel_list, "transaction", + stat_config.metric_no_group, + stat_config.metric_no_merge, + stat_config.user_requested_cpu_list, + stat_config.system_wide, &stat_config.metric_events); } + parse_events_error__init(&errinfo); if (pmu_have_event("cpu", "cycles-ct") && pmu_have_event("cpu", "el-start")) err = parse_events(evsel_list, transaction_attrs, @@ -1449,13 +1770,14 @@ static int add_default_attributes(void) &errinfo); if (err) { fprintf(stderr, "Cannot set up transaction events\n"); - parse_events_print_error(&errinfo, transaction_attrs); - return -1; + parse_events_error__print(&errinfo, transaction_attrs); } - return 0; + parse_events_error__exit(&errinfo); + return err ? -1 : 0; } if (smi_cost) { + struct parse_events_error errinfo; int smi; if (sysfs__read_int(FREEZE_ON_SMI_PATH, &smi) < 0) { @@ -1471,28 +1793,61 @@ static int add_default_attributes(void) smi_reset = true; } - if (pmu_have_event("msr", "aperf") && - pmu_have_event("msr", "smi")) { - if (!force_metric_only) - stat_config.metric_only = true; - err = parse_events(evsel_list, smi_cost_attrs, &errinfo); - } else { + if (!pmu_have_event("msr", "aperf") || + !pmu_have_event("msr", "smi")) { fprintf(stderr, "To measure SMI cost, it needs " "msr/aperf/, msr/smi/ and cpu/cycles/ support\n"); - parse_events_print_error(&errinfo, smi_cost_attrs); return -1; } + if (!force_metric_only) + stat_config.metric_only = true; + + parse_events_error__init(&errinfo); + err = parse_events(evsel_list, smi_cost_attrs, &errinfo); if (err) { - parse_events_print_error(&errinfo, smi_cost_attrs); + parse_events_error__print(&errinfo, smi_cost_attrs); fprintf(stderr, "Cannot set up SMI cost events\n"); - return -1; } - return 0; + parse_events_error__exit(&errinfo); + return err ? -1 : 0; } if (topdown_run) { + const char **metric_attrs = topdown_metric_attrs; + unsigned int max_level = 1; char *str = NULL; bool warn = false; + const char *pmu_name = arch_get_topdown_pmu_name(evsel_list, true); + + if (!force_metric_only) + stat_config.metric_only = true; + + if (pmu_have_event(pmu_name, topdown_metric_L2_attrs[5])) { + metric_attrs = topdown_metric_L2_attrs; + max_level = 2; + } + + if (stat_config.topdown_level > max_level) { + pr_err("Invalid top-down metrics level. The max level is %u.\n", max_level); + return -1; + } else if (!stat_config.topdown_level) + stat_config.topdown_level = max_level; + + if (topdown_filter_events(metric_attrs, &str, 1, pmu_name) < 0) { + pr_err("Out of memory\n"); + return -1; + } + + if (metric_attrs[0] && str) { + if (!stat_config.interval && !stat_config.metric_only) { + fprintf(stat_config.output, + "Topdown accuracy may decrease when measuring long periods.\n" + "Please print the result regularly, e.g. -I1000\n"); + } + goto setup_metrics; + } + + zfree(&str); if (stat_config.aggr_mode != AGGR_GLOBAL && stat_config.aggr_mode != AGGR_CORE) { @@ -1505,25 +1860,30 @@ static int add_default_attributes(void) return -1; } - if (!force_metric_only) - stat_config.metric_only = true; if (topdown_filter_events(topdown_attrs, &str, - arch_topdown_check_group(&warn)) < 0) { + arch_topdown_check_group(&warn), + pmu_name) < 0) { pr_err("Out of memory\n"); return -1; } + if (topdown_attrs[0] && str) { + struct parse_events_error errinfo; if (warn) arch_topdown_group_warn(); +setup_metrics: + parse_events_error__init(&errinfo); err = parse_events(evsel_list, str, &errinfo); if (err) { fprintf(stderr, "Cannot set up top down events %s: %d\n", str, err); - parse_events_print_error(&errinfo, str); + parse_events_error__print(&errinfo, str); + parse_events_error__exit(&errinfo); free(str); return -1; } + parse_events_error__exit(&errinfo); } else { fprintf(stderr, "System does not support topdown\n"); return -1; @@ -1531,23 +1891,27 @@ static int add_default_attributes(void) free(str); } + if (!stat_config.topdown_level) + stat_config.topdown_level = TOPDOWN_MAX_LEVEL; + if (!evsel_list->core.nr_entries) { if (target__has_cpu(&target)) default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; - if (perf_evlist__add_default_attrs(evsel_list, default_attrs0) < 0) + if (evlist__add_default_attrs(evsel_list, default_attrs0) < 0) return -1; if (pmu_have_event("cpu", "stalled-cycles-frontend")) { - if (perf_evlist__add_default_attrs(evsel_list, - frontend_attrs) < 0) + if (evlist__add_default_attrs(evsel_list, frontend_attrs) < 0) return -1; } if (pmu_have_event("cpu", "stalled-cycles-backend")) { - if (perf_evlist__add_default_attrs(evsel_list, - backend_attrs) < 0) + if (evlist__add_default_attrs(evsel_list, backend_attrs) < 0) return -1; } - if (perf_evlist__add_default_attrs(evsel_list, default_attrs1) < 0) + if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) + return -1; + /* Platform specific attrs */ + if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) return -1; } @@ -1557,21 +1921,21 @@ static int add_default_attributes(void) return 0; /* Append detailed run extra attributes: */ - if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) + if (evlist__add_default_attrs(evsel_list, detailed_attrs) < 0) return -1; if (detailed_run < 2) return 0; /* Append very detailed run extra attributes: */ - if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) + if (evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0) return -1; if (detailed_run < 3) return 0; /* Append very, very detailed run extra attributes: */ - return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); + return evlist__add_default_attrs(evsel_list, very_very_detailed_attrs); } static const char * const stat_record_usage[] = { @@ -1609,7 +1973,7 @@ static int __cmd_record(int argc, const char **argv) return -1; } - session = perf_session__new(data, false, NULL); + session = perf_session__new(data, NULL); if (IS_ERR(session)) { pr_err("Perf session creation failed\n"); return PTR_ERR(session); @@ -1684,7 +2048,7 @@ static int set_maps(struct perf_stat *st) perf_evlist__set_maps(&evsel_list->core, st->cpus, st->threads); - if (perf_evlist__alloc_stats(evsel_list, true)) + if (evlist__alloc_stats(evsel_list, true)) return -ENOMEM; st->maps_allocated = true; @@ -1731,35 +2095,6 @@ int process_cpu_map_event(struct perf_session *session, return set_maps(st); } -static int runtime_stat_new(struct perf_stat_config *config, int nthreads) -{ - int i; - - config->stats = calloc(nthreads, sizeof(struct runtime_stat)); - if (!config->stats) - return -1; - - config->stats_num = nthreads; - - for (i = 0; i < nthreads; i++) - runtime_stat__init(&config->stats[i]); - - return 0; -} - -static void runtime_stat_delete(struct perf_stat_config *config) -{ - int i; - - if (!config->stats) - return; - - for (i = 0; i < config->stats_num; i++) - runtime_stat__exit(&config->stats[i]); - - zfree(&config->stats); -} - static const char * const stat_report_usage[] = { "perf stat report [<options>]", NULL, @@ -1807,10 +2142,12 @@ static int __cmd_report(int argc, const char **argv) input_name = "perf.data"; } + perf_stat__init_shadow_stats(); + perf_stat.data.path = input_name; perf_stat.data.mode = PERF_DATA_MODE_READ; - session = perf_session__new(&perf_stat.data, false, &perf_stat.tool); + session = perf_session__new(&perf_stat.data, &perf_stat.tool); if (IS_ERR(session)) return PTR_ERR(session); @@ -1846,8 +2183,10 @@ static void setup_system_wide(int forks) struct evsel *counter; evlist__for_each_entry(evsel_list, counter) { - if (!counter->core.system_wide) + if (!counter->core.requires_cpu && + strcmp(counter->name, "duration_time")) { return; + } } if (evsel_list->core.nr_entries) @@ -1861,11 +2200,12 @@ int cmd_stat(int argc, const char **argv) "perf stat [<options>] [<command>]", NULL }; - int status = -EINVAL, run_idx; + int status = -EINVAL, run_idx, err; const char *mode; FILE *output = stderr; unsigned int interval, timeout; const char * const stat_subcommands[] = { "record", "report" }; + char errbuf[BUFSIZ]; setlocale(LC_ALL, ""); @@ -1883,8 +2223,6 @@ int cmd_stat(int argc, const char **argv) argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, (const char **) stat_usage, PARSE_OPT_STOP_AT_NON_OPTION); - perf_stat__collect_metric_expr(evsel_list); - perf_stat__init_shadow_stats(); if (stat_config.csv_sep) { stat_config.csv_output = true; @@ -1893,11 +2231,11 @@ int cmd_stat(int argc, const char **argv) } else stat_config.csv_sep = DEFAULT_SEPARATOR; - if (argc && !strncmp(argv[0], "rec", 3)) { + if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { argc = __cmd_record(argc, argv); if (argc < 0) return -1; - } else if (argc && !strncmp(argv[0], "rep", 3)) + } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) return __cmd_report(argc, argv); interval = stat_config.interval; @@ -1939,7 +2277,7 @@ int cmd_stat(int argc, const char **argv) goto out; } - if (!output) { + if (!output && !stat_config.quiet) { struct timespec tm; mode = append_file ? "a" : "w"; @@ -1976,6 +2314,12 @@ int cmd_stat(int argc, const char **argv) } else if (big_num_opt == 0) /* User passed --no-big-num */ stat_config.big_num = false; + err = target__validate(&target); + if (err) { + target__strerror(&target, err, errbuf, BUFSIZ); + pr_warning("%s\n", errbuf); + } + setup_system_wide(argc); /* @@ -2019,7 +2363,8 @@ int cmd_stat(int argc, const char **argv) * --per-thread is aggregated per thread, we dont mix it with cpu mode */ if (((stat_config.aggr_mode != AGGR_GLOBAL && - stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) && + stat_config.aggr_mode != AGGR_THREAD) || + (nr_cgroups || stat_config.cgroup_list)) && !target__has_cpu(&target)) { fprintf(stderr, "both cgroup and no-aggregation " "modes only available in system-wide mode\n"); @@ -2027,18 +2372,77 @@ int cmd_stat(int argc, const char **argv) parse_options_usage(stat_usage, stat_options, "G", 1); parse_options_usage(NULL, stat_options, "A", 1); parse_options_usage(NULL, stat_options, "a", 1); + parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); goto out; } + if (stat_config.iostat_run) { + status = iostat_prepare(evsel_list, &stat_config); + if (status) + goto out; + if (iostat_mode == IOSTAT_LIST) { + iostat_list(evsel_list, &stat_config); + goto out; + } else if (verbose) + iostat_list(evsel_list, &stat_config); + if (iostat_mode == IOSTAT_RUN && !target__has_cpu(&target)) + target.system_wide = true; + } + + if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) + target.per_thread = true; + + stat_config.system_wide = target.system_wide; + if (target.cpu_list) { + stat_config.user_requested_cpu_list = strdup(target.cpu_list); + if (!stat_config.user_requested_cpu_list) { + status = -ENOMEM; + goto out; + } + } + + /* + * Metric parsing needs to be delayed as metrics may optimize events + * knowing the target is system-wide. + */ + if (metrics) { + metricgroup__parse_groups(evsel_list, metrics, + stat_config.metric_no_group, + stat_config.metric_no_merge, + stat_config.user_requested_cpu_list, + stat_config.system_wide, + &stat_config.metric_events); + zfree(&metrics); + } + perf_stat__collect_metric_expr(evsel_list); + perf_stat__init_shadow_stats(); + if (add_default_attributes()) goto out; - target__validate(&target); + if (stat_config.cgroup_list) { + if (nr_cgroups > 0) { + pr_err("--cgroup and --for-each-cgroup cannot be used together\n"); + parse_options_usage(stat_usage, stat_options, "G", 1); + parse_options_usage(NULL, stat_options, "for-each-cgroup", 0); + goto out; + } - if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) - target.per_thread = true; + if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, + &stat_config.metric_events, true) < 0) { + parse_options_usage(stat_usage, stat_options, + "for-each-cgroup", 0); + goto out; + } + } - if (perf_evlist__create_maps(evsel_list, &target) < 0) { + if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { + pr_err("failed to use cpu list %s\n", target.cpu_list); + goto out; + } + + target.hybrid = perf_pmu__has_hybrid(); + if (evlist__create_maps(evsel_list, &target) < 0) { if (target__has_task(&target)) { pr_err("Problems finding threads of monitor\n"); parse_options_usage(stat_usage, stat_options, "p", 1); @@ -2051,18 +2455,14 @@ int cmd_stat(int argc, const char **argv) goto out; } + evlist__check_cpu_maps(evsel_list); + /* * Initialize thread_map with comm names, * so we could print it out on output. */ if (stat_config.aggr_mode == AGGR_THREAD) { thread_map__read_comms(evsel_list->core.threads); - if (target.system_wide) { - if (runtime_stat_new(&stat_config, - perf_thread_map__nr(evsel_list->core.threads))) { - goto out; - } - } } if (stat_config.aggr_mode == AGGR_NODE) @@ -2095,7 +2495,7 @@ int cmd_stat(int argc, const char **argv) goto out; } - if (perf_evlist__alloc_stats(evsel_list, interval)) + if (evlist__alloc_stats(evsel_list, interval)) goto out; if (perf_stat_init_aggr_mode()) @@ -2125,6 +2525,11 @@ int cmd_stat(int argc, const char **argv) signal(SIGALRM, skip_signal); signal(SIGABRT, skip_signal); + if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) + goto out; + + /* Enable ignoring missing threads when -p option is defined. */ + evlist__first(evsel_list)->ignore_missing_thread = target.pid; status = 0; for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { if (stat_config.run_count != 1 && verbose > 0) @@ -2132,7 +2537,7 @@ int cmd_stat(int argc, const char **argv) run_idx + 1); if (run_idx != 0) - perf_evlist__reset_prev_raw_counts(evsel_list); + evlist__reset_prev_raw_counts(evsel_list); status = run_perf_stat(argc, argv, run_idx); if (forever && status != -1 && !interval) { @@ -2141,14 +2546,16 @@ int cmd_stat(int argc, const char **argv) } } - if (!forever && status != -1 && !interval) + if (!forever && status != -1 && (!interval || stat_config.summary)) print_counters(NULL, argc, argv); + evlist__finalize_ctlfd(evsel_list); + if (STAT_RECORD) { /* * We synthesize the kernel mmap record just so that older tools * don't emit warnings about not being able to resolve symbols - * due to /proc/sys/kernel/kptr_restrict settings and instear provide + * due to /proc/sys/kernel/kptr_restrict settings and instead provide * a saner message about no samples being in the perf.data file. * * This also serves to suppress a warning about f_header.data.size == 0 @@ -2158,9 +2565,10 @@ int cmd_stat(int argc, const char **argv) * tools remain -acme */ int fd = perf_data__fd(&perf_stat.data); - int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, - process_synthesized_event, - &perf_stat.session->machines.host); + + err = perf_event__synthesize_kernel_mmap((void *)&perf_stat, + process_synthesized_event, + &perf_stat.session->machines.host); if (err) { pr_warning("Couldn't synthesize the kernel mmap record, harmless, " "older tools may produce warnings about this file\n."); @@ -2181,16 +2589,21 @@ int cmd_stat(int argc, const char **argv) } perf_stat__exit_aggr_mode(); - perf_evlist__free_stats(evsel_list); + evlist__free_stats(evsel_list); out: + if (stat_config.iostat_run) + iostat_release(evsel_list); + zfree(&stat_config.walltime_run); + zfree(&stat_config.user_requested_cpu_list); if (smi_cost && smi_reset) sysfs__write_int(FREEZE_ON_SMI_PATH, 0); evlist__delete(evsel_list); - runtime_stat_delete(&stat_config); + metricgroup__rblist_exit(&stat_config.metric_events); + evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); return status; } |