diff options
Diffstat (limited to '')
-rw-r--r-- | tools/perf/builtin-stat.c | 822 |
1 files changed, 407 insertions, 415 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 7974933dbc77..265b05157972 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -191,6 +191,7 @@ static bool append_file; static bool interval_count; static const char *output_name; static int output_fd; +static char *metrics; struct perf_stat { bool record; @@ -216,6 +217,7 @@ static struct perf_stat_config stat_config = { .run_count = 1, .metric_only_len = METRIC_ONLY_LEN, .walltime_nsecs_stats = &walltime_nsecs_stats, + .ru_stats = &ru_stats, .big_num = true, .ctl_fd = -1, .ctl_fd_ack = -1, @@ -230,11 +232,12 @@ static bool cpus_map_matched(struct evsel *a, struct evsel *b) if (!a->core.cpus || !b->core.cpus) return false; - if (a->core.cpus->nr != b->core.cpus->nr) + if (perf_cpu_map__nr(a->core.cpus) != perf_cpu_map__nr(b->core.cpus)) return false; - for (int i = 0; i < a->core.cpus->nr; i++) { - if (a->core.cpus->map[i] != b->core.cpus->map[i]) + for (int i = 0; i < perf_cpu_map__nr(a->core.cpus); i++) { + if (perf_cpu_map__cpu(a->core.cpus, i).cpu != + perf_cpu_map__cpu(b->core.cpus, i).cpu) return false; } @@ -270,11 +273,8 @@ static void evlist__check_cpu_maps(struct evlist *evlist) pr_warning(" %s: %s\n", evsel->name, buf); } - for_each_group_evsel(pos, leader) { - evsel__set_leader(pos, pos); - pos->core.nr_members = 0; - } - evsel->core.leader->nr_members = 0; + for_each_group_evsel(pos, leader) + evsel__remove_from_group(pos, leader); } } @@ -292,13 +292,8 @@ static inline void diff_timespec(struct timespec *r, struct timespec *a, static void perf_stat__reset_stats(void) { - int i; - evlist__reset_stats(evsel_list); perf_stat__reset_shadow_stats(); - - for (i = 0; i < stat_config.stats_num; i++) - perf_stat__reset_shadow_per_stat(&stat_config.stats[i]); } static int process_synthesized_event(struct perf_tool *tool __maybe_unused, @@ -327,34 +322,55 @@ static int write_stat_round_event(u64 tm, u64 type) #define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y) -static int evsel__write_stat_event(struct evsel *counter, u32 cpu, u32 thread, +static int evsel__write_stat_event(struct evsel *counter, int cpu_map_idx, u32 thread, struct perf_counts_values *count) { - struct perf_sample_id *sid = SID(counter, cpu, thread); + struct perf_sample_id *sid = SID(counter, cpu_map_idx, thread); + struct perf_cpu cpu = perf_cpu_map__cpu(evsel__cpus(counter), cpu_map_idx); return perf_event__synthesize_stat(NULL, cpu, thread, sid->id, count, process_synthesized_event, NULL); } -static int read_single_counter(struct evsel *counter, int cpu, +static int read_single_counter(struct evsel *counter, int cpu_map_idx, int thread, struct timespec *rs) { - if (counter->tool_event == PERF_TOOL_DURATION_TIME) { - u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; - struct perf_counts_values *count = - perf_counts(counter->counts, cpu, thread); - count->ena = count->run = val; - count->val = val; - return 0; + switch(counter->tool_event) { + case PERF_TOOL_DURATION_TIME: { + u64 val = rs->tv_nsec + rs->tv_sec*1000000000ULL; + struct perf_counts_values *count = + perf_counts(counter->counts, cpu_map_idx, thread); + count->ena = count->run = val; + count->val = val; + return 0; + } + case PERF_TOOL_USER_TIME: + case PERF_TOOL_SYSTEM_TIME: { + u64 val; + struct perf_counts_values *count = + perf_counts(counter->counts, cpu_map_idx, thread); + if (counter->tool_event == PERF_TOOL_USER_TIME) + val = ru_stats.ru_utime_usec_stat.mean; + else + val = ru_stats.ru_stime_usec_stat.mean; + count->ena = count->run = val; + count->val = val; + return 0; + } + default: + case PERF_TOOL_NONE: + return evsel__read_counter(counter, cpu_map_idx, thread); + case PERF_TOOL_MAX: + /* This should never be reached */ + return 0; } - return evsel__read_counter(counter, cpu, thread); } /* * Read out the results of a single counter: * do not aggregate counts across CPUs in system-wide mode */ -static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) +static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu_map_idx) { int nthreads = perf_thread_map__nr(evsel_list->core.threads); int thread; @@ -362,30 +378,27 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) if (!counter->supported) return -ENOENT; - if (counter->core.system_wide) - nthreads = 1; - for (thread = 0; thread < nthreads; thread++) { struct perf_counts_values *count; - count = perf_counts(counter->counts, cpu, thread); + count = perf_counts(counter->counts, cpu_map_idx, thread); /* * The leader's group read loads data into its group members * (via evsel__read_counter()) and sets their count->loaded. */ - if (!perf_counts__is_loaded(counter->counts, cpu, thread) && - read_single_counter(counter, cpu, thread, rs)) { + if (!perf_counts__is_loaded(counter->counts, cpu_map_idx, thread) && + read_single_counter(counter, cpu_map_idx, thread, rs)) { counter->counts->scaled = -1; - perf_counts(counter->counts, cpu, thread)->ena = 0; - perf_counts(counter->counts, cpu, thread)->run = 0; + perf_counts(counter->counts, cpu_map_idx, thread)->ena = 0; + perf_counts(counter->counts, cpu_map_idx, thread)->run = 0; return -1; } - perf_counts__set_loaded(counter->counts, cpu, thread, false); + perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, false); if (STAT_RECORD) { - if (evsel__write_stat_event(counter, cpu, thread, count)) { + if (evsel__write_stat_event(counter, cpu_map_idx, thread, count)) { pr_err("failed to write stat event\n"); return -1; } @@ -395,7 +408,8 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) fprintf(stat_config.output, "%s: %d: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", evsel__name(counter), - cpu, + perf_cpu_map__cpu(evsel__cpus(counter), + cpu_map_idx).cpu, count->val, count->ena, count->run); } } @@ -405,36 +419,33 @@ static int read_counter_cpu(struct evsel *counter, struct timespec *rs, int cpu) static int read_affinity_counters(struct timespec *rs) { - struct evsel *counter; - struct affinity affinity; - int i, ncpus, cpu; + struct evlist_cpu_iterator evlist_cpu_itr; + struct affinity saved_affinity, *affinity; if (all_counters_use_bpf) return 0; - if (affinity__setup(&affinity) < 0) + if (!target__has_cpu(&target) || target__has_per_thread(&target)) + affinity = NULL; + else if (affinity__setup(&saved_affinity) < 0) return -1; + else + affinity = &saved_affinity; - ncpus = perf_cpu_map__nr(evsel_list->core.all_cpus); - if (!target__has_cpu(&target) || target__has_per_thread(&target)) - ncpus = 1; - evlist__for_each_cpu(evsel_list, i, cpu) { - if (i >= ncpus) - break; - affinity__set(&affinity, cpu); + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + struct evsel *counter = evlist_cpu_itr.evsel; - evlist__for_each_entry(evsel_list, counter) { - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (evsel__is_bpf(counter)) - continue; - if (!counter->err) { - counter->err = read_counter_cpu(counter, rs, - counter->cpu_iter - 1); - } + if (evsel__is_bpf(counter)) + continue; + + if (!counter->err) { + counter->err = read_counter_cpu(counter, rs, + evlist_cpu_itr.cpu_map_idx); } } - affinity__cleanup(&affinity); + if (affinity) + affinity__cleanup(&saved_affinity); + return 0; } @@ -473,46 +484,6 @@ static void read_counters(struct timespec *rs) } } -static int runtime_stat_new(struct perf_stat_config *config, int nthreads) -{ - int i; - - config->stats = calloc(nthreads, sizeof(struct runtime_stat)); - if (!config->stats) - return -1; - - config->stats_num = nthreads; - - for (i = 0; i < nthreads; i++) - runtime_stat__init(&config->stats[i]); - - return 0; -} - -static void runtime_stat_delete(struct perf_stat_config *config) -{ - int i; - - if (!config->stats) - return; - - for (i = 0; i < config->stats_num; i++) - runtime_stat__exit(&config->stats[i]); - - zfree(&config->stats); -} - -static void runtime_stat_reset(struct perf_stat_config *config) -{ - int i; - - if (!config->stats) - return; - - for (i = 0; i < config->stats_num; i++) - perf_stat__reset_shadow_per_stat(&config->stats[i]); -} - static void process_interval(void) { struct timespec ts, rs; @@ -521,7 +492,6 @@ static void process_interval(void) diff_timespec(&rs, &ts, &ref_time); perf_stat__reset_shadow_per_stat(&rt_stat); - runtime_stat_reset(&stat_config); read_counters(&rs); if (STAT_RECORD) { @@ -646,9 +616,7 @@ static void process_evlist(struct evlist *evlist, unsigned int interval) if (evlist__ctlfd_process(evlist, &cmd) > 0) { switch (cmd) { case EVLIST_CTL_CMD_ENABLE: - if (interval) - process_interval(); - break; + __fallthrough; case EVLIST_CTL_CMD_DISABLE: if (interval) process_interval(); @@ -788,8 +756,9 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) int status = 0; const bool forks = (argc > 0); bool is_pipe = STAT_RECORD ? perf_stat.data.is_pipe : false; - struct affinity affinity; - int i, cpu, err; + struct evlist_cpu_iterator evlist_cpu_itr; + struct affinity saved_affinity, *affinity = NULL; + int err; bool second_pass = false; if (forks) { @@ -803,113 +772,110 @@ static int __run_perf_stat(int argc, const char **argv, int run_idx) if (group) evlist__set_leader(evsel_list); - if (affinity__setup(&affinity) < 0) - return -1; + if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) { + if (affinity__setup(&saved_affinity) < 0) + return -1; + affinity = &saved_affinity; + } evlist__for_each_entry(evsel_list, counter) { + counter->reset_group = false; if (bpf_counter__load(counter, &target)) return -1; if (!evsel__is_bpf(counter)) all_counters_use_bpf = false; } - evlist__for_each_cpu (evsel_list, i, cpu) { + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + counter = evlist_cpu_itr.evsel; + /* * bperf calls evsel__open_per_cpu() in bperf__load(), so * no need to call it again here. */ if (target.use_bpf) break; - affinity__set(&affinity, cpu); - evlist__for_each_entry(evsel_list, counter) { - if (evsel__cpu_iter_skip(counter, cpu)) + if (counter->reset_group || counter->errored) + continue; + if (evsel__is_bpf(counter)) + continue; +try_again: + if (create_perf_stat_counter(counter, &stat_config, &target, + evlist_cpu_itr.cpu_map_idx) < 0) { + + /* + * Weak group failed. We cannot just undo this here + * because earlier CPUs might be in group mode, and the kernel + * doesn't support mixing group and non group reads. Defer + * it to later. + * Don't close here because we're in the wrong affinity. + */ + if ((errno == EINVAL || errno == EBADF) && + evsel__leader(counter) != counter && + counter->weak_group) { + evlist__reset_weak_group(evsel_list, counter, false); + assert(counter->reset_group); + second_pass = true; continue; - if (counter->reset_group || counter->errored) + } + + switch (stat_handle_error(counter)) { + case COUNTER_FATAL: + return -1; + case COUNTER_RETRY: + goto try_again; + case COUNTER_SKIP: + continue; + default: + break; + } + + } + counter->supported = true; + } + + if (second_pass) { + /* + * Now redo all the weak group after closing them, + * and also close errored counters. + */ + + /* First close errored or weak retry */ + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + counter = evlist_cpu_itr.evsel; + + if (!counter->reset_group && !counter->errored) continue; - if (evsel__is_bpf(counter)) + + perf_evsel__close_cpu(&counter->core, evlist_cpu_itr.cpu_map_idx); + } + /* Now reopen weak */ + evlist__for_each_cpu(evlist_cpu_itr, evsel_list, affinity) { + counter = evlist_cpu_itr.evsel; + + if (!counter->reset_group) continue; -try_again: +try_again_reset: + pr_debug2("reopening weak %s\n", evsel__name(counter)); if (create_perf_stat_counter(counter, &stat_config, &target, - counter->cpu_iter - 1) < 0) { - - /* - * Weak group failed. We cannot just undo this here - * because earlier CPUs might be in group mode, and the kernel - * doesn't support mixing group and non group reads. Defer - * it to later. - * Don't close here because we're in the wrong affinity. - */ - if ((errno == EINVAL || errno == EBADF) && - evsel__leader(counter) != counter && - counter->weak_group) { - evlist__reset_weak_group(evsel_list, counter, false); - assert(counter->reset_group); - second_pass = true; - continue; - } + evlist_cpu_itr.cpu_map_idx) < 0) { switch (stat_handle_error(counter)) { case COUNTER_FATAL: return -1; case COUNTER_RETRY: - goto try_again; + goto try_again_reset; case COUNTER_SKIP: continue; default: break; } - } counter->supported = true; } } - - if (second_pass) { - /* - * Now redo all the weak group after closing them, - * and also close errored counters. - */ - - evlist__for_each_cpu(evsel_list, i, cpu) { - affinity__set(&affinity, cpu); - /* First close errored or weak retry */ - evlist__for_each_entry(evsel_list, counter) { - if (!counter->reset_group && !counter->errored) - continue; - if (evsel__cpu_iter_skip_no_inc(counter, cpu)) - continue; - perf_evsel__close_cpu(&counter->core, counter->cpu_iter); - } - /* Now reopen weak */ - evlist__for_each_entry(evsel_list, counter) { - if (!counter->reset_group && !counter->errored) - continue; - if (evsel__cpu_iter_skip(counter, cpu)) - continue; - if (!counter->reset_group) - continue; -try_again_reset: - pr_debug2("reopening weak %s\n", evsel__name(counter)); - if (create_perf_stat_counter(counter, &stat_config, &target, - counter->cpu_iter - 1) < 0) { - - switch (stat_handle_error(counter)) { - case COUNTER_FATAL: - return -1; - case COUNTER_RETRY: - goto try_again_reset; - case COUNTER_SKIP: - continue; - default: - break; - } - } - counter->supported = true; - } - } - } - affinity__cleanup(&affinity); + affinity__cleanup(affinity); evlist__for_each_entry(evsel_list, counter) { if (!counter->supported) { @@ -952,18 +918,18 @@ try_again_reset: return err; } - /* - * Enable counters and exec the command: - */ - if (forks) { + err = enable_counters(); + if (err) + return -1; + + /* Exec the command, if any */ + if (forks) evlist__start_workload(evsel_list); - err = enable_counters(); - if (err) - return -1; - t0 = rdclock(); - clock_gettime(CLOCK_MONOTONIC, &ref_time); + t0 = rdclock(); + clock_gettime(CLOCK_MONOTONIC, &ref_time); + if (forks) { if (interval || timeout || evlist__ctlfd_initialized(evsel_list)) status = dispatch_events(forks, timeout, interval, ×); if (child_pid != -1) { @@ -981,13 +947,6 @@ try_again_reset: if (WIFSIGNALED(status)) psignal(WTERMSIG(status), argv[0]); } else { - err = enable_counters(); - if (err) - return -1; - - t0 = rdclock(); - clock_gettime(CLOCK_MONOTONIC, &ref_time); - status = dispatch_events(forks, timeout, interval, ×); } @@ -1009,10 +968,11 @@ try_again_reset: evlist__copy_prev_raw_counts(evsel_list); evlist__reset_prev_raw_counts(evsel_list); - runtime_stat_reset(&stat_config); perf_stat__reset_shadow_per_stat(&rt_stat); - } else + } else { update_stats(&walltime_nsecs_stats, t1 - t0); + update_rusage_stats(&ru_stats, &stat_config.ru_data); + } /* * Closing a group leader splits the group, and as we only disable @@ -1138,14 +1098,23 @@ static int enable_metric_only(const struct option *opt __maybe_unused, return 0; } -static int parse_metric_groups(const struct option *opt, +static int append_metric_groups(const struct option *opt __maybe_unused, const char *str, int unset __maybe_unused) { - return metricgroup__parse_groups(opt, str, - stat_config.metric_no_group, - stat_config.metric_no_merge, - &stat_config.metric_events); + if (metrics) { + char *tmp; + + if (asprintf(&tmp, "%s,%s", metrics, str) < 0) + return -ENOMEM; + free(metrics); + metrics = tmp; + } else { + metrics = strdup(str); + if (!metrics) + return -ENOMEM; + } + return 0; } static int parse_control_option(const struct option *opt, @@ -1168,6 +1137,26 @@ static int parse_stat_cgroups(const struct option *opt, return parse_cgroups(opt, str, unset); } +static int parse_hybrid_type(const struct option *opt, + const char *str, + int unset __maybe_unused) +{ + struct evlist *evlist = *(struct evlist **)opt->value; + + if (!list_empty(&evlist->core.entries)) { + fprintf(stderr, "Must define cputype before events/metrics\n"); + return -1; + } + + evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu(str); + if (!evlist->hybrid_pmu_name) { + fprintf(stderr, "--cputype %s is not supported!\n", str); + return -1; + } + + return 0; +} + static struct option stat_options[] = { OPT_BOOLEAN('T', "transaction", &transaction_run, "hardware transaction statistics"), @@ -1216,8 +1205,12 @@ static struct option stat_options[] = { OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode, "disable CPU count aggregation", AGGR_NONE), OPT_BOOLEAN(0, "no-merge", &stat_config.no_merge, "Do not merge identical named events"), + OPT_BOOLEAN(0, "hybrid-merge", &stat_config.hybrid_merge, + "Merge identical named hybrid events"), OPT_STRING('x', "field-separator", &stat_config.csv_sep, "separator", "print counts with custom separator"), + OPT_BOOLEAN('j', "json-output", &stat_config.json_output, + "print counts in JSON format"), OPT_CALLBACK('G', "cgroup", &evsel_list, "name", "monitor event in cgroup name only", parse_stat_cgroups), OPT_STRING(0, "for-each-cgroup", &stat_config.cgroup_list, "name", @@ -1265,7 +1258,7 @@ static struct option stat_options[] = { "measure SMI cost"), OPT_CALLBACK('M', "metrics", &evsel_list, "metric/metric group list", "monitor specified metrics or metric groups (separated by ,)", - parse_metric_groups), + append_metric_groups), OPT_BOOLEAN_FLAG(0, "all-kernel", &stat_config.all_kernel, "Configure all used events to run in kernel space.", PARSE_OPT_EXCLUSIVE), @@ -1282,6 +1275,10 @@ static struct option stat_options[] = { "don't print 'summary' for CSV summary output"), OPT_BOOLEAN(0, "quiet", &stat_config.quiet, "don't print output (useful with record)"), + OPT_CALLBACK(0, "cputype", &evsel_list, "hybrid cpu type", + "Only enable events on applying cpu with this type " + "for hybrid platform (e.g. core or atom)", + parse_hybrid_type), #ifdef HAVE_LIBPFM OPT_CALLBACK(0, "pfm-events", &evsel_list, "event", "libpfm4 event selector. use 'perf list' to list available events", @@ -1298,70 +1295,75 @@ static struct option stat_options[] = { OPT_END() }; +static const char *const aggr_mode__string[] = { + [AGGR_CORE] = "core", + [AGGR_DIE] = "die", + [AGGR_GLOBAL] = "global", + [AGGR_NODE] = "node", + [AGGR_NONE] = "none", + [AGGR_SOCKET] = "socket", + [AGGR_THREAD] = "thread", + [AGGR_UNSET] = "unset", +}; + static struct aggr_cpu_id perf_stat__get_socket(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) + struct perf_cpu cpu) { - return cpu_map__get_socket(map, cpu, NULL); + return aggr_cpu_id__socket(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_die(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) + struct perf_cpu cpu) { - return cpu_map__get_die(map, cpu, NULL); + return aggr_cpu_id__die(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_core(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) + struct perf_cpu cpu) { - return cpu_map__get_core(map, cpu, NULL); + return aggr_cpu_id__core(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_node(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int cpu) + struct perf_cpu cpu) { - return cpu_map__get_node(map, cpu, NULL); + return aggr_cpu_id__node(cpu, /*data=*/NULL); } static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, - aggr_get_id_t get_id, struct perf_cpu_map *map, int idx) + aggr_get_id_t get_id, struct perf_cpu cpu) { - int cpu; - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); - - if (idx >= map->nr) - return id; + struct aggr_cpu_id id = aggr_cpu_id__empty(); - cpu = map->map[idx]; + if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) + config->cpus_aggr_map->map[cpu.cpu] = get_id(config, cpu); - if (cpu_map__aggr_cpu_id_is_empty(config->cpus_aggr_map->map[cpu])) - config->cpus_aggr_map->map[cpu] = get_id(config, map, idx); - - id = config->cpus_aggr_map->map[cpu]; + id = config->cpus_aggr_map->map[cpu.cpu]; return id; } static struct aggr_cpu_id perf_stat__get_socket_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_socket, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_socket, cpu); } static struct aggr_cpu_id perf_stat__get_die_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_die, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_die, cpu); } static struct aggr_cpu_id perf_stat__get_core_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_core, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_core, cpu); } static struct aggr_cpu_id perf_stat__get_node_cached(struct perf_stat_config *config, - struct perf_cpu_map *map, int idx) + struct perf_cpu cpu) { - return perf_stat__get_aggr(config, perf_stat__get_node, map, idx); + return perf_stat__get_aggr(config, perf_stat__get_node, cpu); } static bool term_percore_set(void) @@ -1376,54 +1378,69 @@ static bool term_percore_set(void) return false; } -static int perf_stat_init_aggr_mode(void) +static aggr_cpu_id_get_t aggr_mode__get_aggr(enum aggr_mode aggr_mode) { - int nr; + switch (aggr_mode) { + case AGGR_SOCKET: + return aggr_cpu_id__socket; + case AGGR_DIE: + return aggr_cpu_id__die; + case AGGR_CORE: + return aggr_cpu_id__core; + case AGGR_NODE: + return aggr_cpu_id__node; + case AGGR_NONE: + if (term_percore_set()) + return aggr_cpu_id__core; - switch (stat_config.aggr_mode) { + return NULL; + case AGGR_GLOBAL: + case AGGR_THREAD: + case AGGR_UNSET: + case AGGR_MAX: + default: + return NULL; + } +} + +static aggr_get_id_t aggr_mode__get_id(enum aggr_mode aggr_mode) +{ + switch (aggr_mode) { case AGGR_SOCKET: - if (cpu_map__build_socket_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build socket map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_socket_cached; - break; + return perf_stat__get_socket_cached; case AGGR_DIE: - if (cpu_map__build_die_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build die map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_die_cached; - break; + return perf_stat__get_die_cached; case AGGR_CORE: - if (cpu_map__build_core_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_cached; - break; + return perf_stat__get_core_cached; case AGGR_NODE: - if (cpu_map__build_node_map(evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_node_cached; - break; + return perf_stat__get_node_cached; case AGGR_NONE: if (term_percore_set()) { - if (cpu_map__build_core_map(evsel_list->core.cpus, - &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_cached; + return perf_stat__get_core_cached; } - break; + return NULL; case AGGR_GLOBAL: case AGGR_THREAD: case AGGR_UNSET: + case AGGR_MAX: default: - break; + return NULL; + } +} + +static int perf_stat_init_aggr_mode(void) +{ + int nr; + aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode); + + if (get_id) { + stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, + get_id, /*data=*/NULL); + if (!stat_config.aggr_map) { + pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); + return -1; + } + stat_config.aggr_get_id = aggr_mode__get_id(stat_config.aggr_mode); } /* @@ -1431,7 +1448,10 @@ static int perf_stat_init_aggr_mode(void) * taking the highest cpu number to be the size of * the aggregation translate cpumap. */ - nr = perf_cpu_map__max(evsel_list->core.cpus); + if (evsel_list->core.user_requested_cpus) + nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; + else + nr = 0; stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); return stat_config.cpus_aggr_map ? 0 : -ENOMEM; } @@ -1459,169 +1479,141 @@ static void perf_stat__exit_aggr_mode(void) stat_config.cpus_aggr_map = NULL; } -static inline int perf_env__get_cpu(struct perf_env *env, struct perf_cpu_map *map, int idx) -{ - int cpu; - - if (idx > map->nr) - return -1; - - cpu = map->map[idx]; - - if (cpu >= env->nr_cpus_avail) - return -1; - - return cpu; -} - -static struct aggr_cpu_id perf_env__get_socket(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_socket_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; - int cpu = perf_env__get_cpu(env, map, idx); - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu != -1) - id.socket = env->cpu[cpu].socket_id; + if (cpu.cpu != -1) + id.socket = env->cpu[cpu.cpu].socket_id; return id; } -static struct aggr_cpu_id perf_env__get_die(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_die_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); - int cpu = perf_env__get_cpu(env, map, idx); + struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu != -1) { + if (cpu.cpu != -1) { /* * die_id is relative to socket, so start * with the socket ID and then add die to * make a unique ID. */ - id.socket = env->cpu[cpu].socket_id; - id.die = env->cpu[cpu].die_id; + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; } return id; } -static struct aggr_cpu_id perf_env__get_core(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_core_aggr_by_cpu(struct perf_cpu cpu, void *data) { struct perf_env *env = data; - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); - int cpu = perf_env__get_cpu(env, map, idx); + struct aggr_cpu_id id = aggr_cpu_id__empty(); - if (cpu != -1) { + if (cpu.cpu != -1) { /* * core_id is relative to socket and die, * we need a global id. So we set * socket, die id and core id */ - id.socket = env->cpu[cpu].socket_id; - id.die = env->cpu[cpu].die_id; - id.core = env->cpu[cpu].core_id; + id.socket = env->cpu[cpu.cpu].socket_id; + id.die = env->cpu[cpu.cpu].die_id; + id.core = env->cpu[cpu.cpu].core_id; } return id; } -static struct aggr_cpu_id perf_env__get_node(struct perf_cpu_map *map, int idx, void *data) +static struct aggr_cpu_id perf_env__get_node_aggr_by_cpu(struct perf_cpu cpu, void *data) { - int cpu = perf_env__get_cpu(data, map, idx); - struct aggr_cpu_id id = cpu_map__empty_aggr_cpu_id(); + struct aggr_cpu_id id = aggr_cpu_id__empty(); id.node = perf_env__numa_node(data, cpu); return id; } -static int perf_env__build_socket_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct cpu_aggr_map **sockp) -{ - return cpu_map__build_map(cpus, sockp, perf_env__get_socket, env); -} - -static int perf_env__build_die_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct cpu_aggr_map **diep) -{ - return cpu_map__build_map(cpus, diep, perf_env__get_die, env); -} - -static int perf_env__build_core_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct cpu_aggr_map **corep) -{ - return cpu_map__build_map(cpus, corep, perf_env__get_core, env); -} - -static int perf_env__build_node_map(struct perf_env *env, struct perf_cpu_map *cpus, - struct cpu_aggr_map **nodep) -{ - return cpu_map__build_map(cpus, nodep, perf_env__get_node, env); -} - static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) + struct perf_cpu cpu) { - return perf_env__get_socket(map, idx, &perf_stat.session->header.env); + return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) + struct perf_cpu cpu) { - return perf_env__get_die(map, idx, &perf_stat.session->header.env); + return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) + struct perf_cpu cpu) { - return perf_env__get_core(map, idx, &perf_stat.session->header.env); + return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); } static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, - struct perf_cpu_map *map, int idx) + struct perf_cpu cpu) { - return perf_env__get_node(map, idx, &perf_stat.session->header.env); + return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); } -static int perf_stat_init_aggr_mode_file(struct perf_stat *st) +static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) { - struct perf_env *env = &st->session->header.env; + switch (aggr_mode) { + case AGGR_SOCKET: + return perf_env__get_socket_aggr_by_cpu; + case AGGR_DIE: + return perf_env__get_die_aggr_by_cpu; + case AGGR_CORE: + return perf_env__get_core_aggr_by_cpu; + case AGGR_NODE: + return perf_env__get_node_aggr_by_cpu; + case AGGR_NONE: + case AGGR_GLOBAL: + case AGGR_THREAD: + case AGGR_UNSET: + case AGGR_MAX: + default: + return NULL; + } +} - switch (stat_config.aggr_mode) { +static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) +{ + switch (aggr_mode) { case AGGR_SOCKET: - if (perf_env__build_socket_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build socket map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_socket_file; - break; + return perf_stat__get_socket_file; case AGGR_DIE: - if (perf_env__build_die_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build die map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_die_file; - break; + return perf_stat__get_die_file; case AGGR_CORE: - if (perf_env__build_core_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_core_file; - break; + return perf_stat__get_core_file; case AGGR_NODE: - if (perf_env__build_node_map(env, evsel_list->core.cpus, &stat_config.aggr_map)) { - perror("cannot build core map"); - return -1; - } - stat_config.aggr_get_id = perf_stat__get_node_file; - break; + return perf_stat__get_node_file; case AGGR_NONE: case AGGR_GLOBAL: case AGGR_THREAD: case AGGR_UNSET: + case AGGR_MAX: default: - break; + return NULL; } +} + +static int perf_stat_init_aggr_mode_file(struct perf_stat *st) +{ + struct perf_env *env = &st->session->header.env; + aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); + + if (!get_id) + return 0; + stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, get_id, env); + if (!stat_config.aggr_map) { + pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]); + return -1; + } + stat_config.aggr_get_id = aggr_mode__get_id_file(stat_config.aggr_mode); return 0; } @@ -1653,12 +1645,6 @@ static int add_default_attributes(void) { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES }, }; - struct perf_event_attr default_sw_attrs[] = { - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS }, - { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS }, -}; /* * Detailed stats (-d), covering the L1 and last level data caches: @@ -1750,6 +1736,9 @@ static int add_default_attributes(void) (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) | (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) }, }; + + struct perf_event_attr default_null_attrs[] = {}; + /* Set attrs if no event is selected and !null_run: */ if (stat_config.null_run) return 0; @@ -1762,11 +1751,11 @@ static int add_default_attributes(void) * on an architecture test for such a metric name. */ if (metricgroup__has_metric("transaction")) { - struct option opt = { .value = &evsel_list }; - - return metricgroup__parse_groups(&opt, "transaction", + return metricgroup__parse_groups(evsel_list, "transaction", stat_config.metric_no_group, - stat_config.metric_no_merge, + stat_config.metric_no_merge, + stat_config.user_requested_cpu_list, + stat_config.system_wide, &stat_config.metric_events); } @@ -1828,11 +1817,12 @@ static int add_default_attributes(void) unsigned int max_level = 1; char *str = NULL; bool warn = false; + const char *pmu_name = arch_get_topdown_pmu_name(evsel_list, true); if (!force_metric_only) stat_config.metric_only = true; - if (pmu_have_event("cpu", topdown_metric_L2_attrs[5])) { + if (pmu_have_event(pmu_name, topdown_metric_L2_attrs[5])) { metric_attrs = topdown_metric_L2_attrs; max_level = 2; } @@ -1843,10 +1833,11 @@ static int add_default_attributes(void) } else if (!stat_config.topdown_level) stat_config.topdown_level = max_level; - if (topdown_filter_events(metric_attrs, &str, 1) < 0) { + if (topdown_filter_events(metric_attrs, &str, 1, pmu_name) < 0) { pr_err("Out of memory\n"); return -1; } + if (metric_attrs[0] && str) { if (!stat_config.interval && !stat_config.metric_only) { fprintf(stat_config.output, @@ -1870,10 +1861,12 @@ static int add_default_attributes(void) } if (topdown_filter_events(topdown_attrs, &str, - arch_topdown_check_group(&warn)) < 0) { + arch_topdown_check_group(&warn), + pmu_name) < 0) { pr_err("Out of memory\n"); return -1; } + if (topdown_attrs[0] && str) { struct parse_events_error errinfo; if (warn) @@ -1898,31 +1891,10 @@ setup_metrics: free(str); } - if (!evsel_list->core.nr_entries) { - if (perf_pmu__has_hybrid()) { - struct parse_events_error errinfo; - const char *hybrid_str = "cycles,instructions,branches,branch-misses"; - - if (target__has_cpu(&target)) - default_sw_attrs[0].config = PERF_COUNT_SW_CPU_CLOCK; - - if (evlist__add_default_attrs(evsel_list, - default_sw_attrs) < 0) { - return -1; - } - - parse_events_error__init(&errinfo); - err = parse_events(evsel_list, hybrid_str, &errinfo); - if (err) { - fprintf(stderr, - "Cannot set up hybrid events %s: %d\n", - hybrid_str, err); - parse_events_error__print(&errinfo, hybrid_str); - } - parse_events_error__exit(&errinfo); - return err ? -1 : 0; - } + if (!stat_config.topdown_level) + stat_config.topdown_level = TOPDOWN_MAX_LEVEL; + if (!evsel_list->core.nr_entries) { if (target__has_cpu(&target)) default_attrs0[0].config = PERF_COUNT_SW_CPU_CLOCK; @@ -1938,9 +1910,8 @@ setup_metrics: } if (evlist__add_default_attrs(evsel_list, default_attrs1) < 0) return -1; - - stat_config.topdown_level = TOPDOWN_MAX_LEVEL; - if (arch_evlist__add_default_attrs(evsel_list) < 0) + /* Platform specific attrs */ + if (evlist__add_default_attrs(evsel_list, default_null_attrs) < 0) return -1; } @@ -2171,6 +2142,8 @@ static int __cmd_report(int argc, const char **argv) input_name = "perf.data"; } + perf_stat__init_shadow_stats(); + perf_stat.data.path = input_name; perf_stat.data.mode = PERF_DATA_MODE_READ; @@ -2210,7 +2183,7 @@ static void setup_system_wide(int forks) struct evsel *counter; evlist__for_each_entry(evsel_list, counter) { - if (!counter->core.system_wide && + if (!counter->core.requires_cpu && strcmp(counter->name, "duration_time")) { return; } @@ -2250,8 +2223,6 @@ int cmd_stat(int argc, const char **argv) argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands, (const char **) stat_usage, PARSE_OPT_STOP_AT_NON_OPTION); - perf_stat__collect_metric_expr(evsel_list); - perf_stat__init_shadow_stats(); if (stat_config.csv_sep) { stat_config.csv_output = true; @@ -2260,11 +2231,11 @@ int cmd_stat(int argc, const char **argv) } else stat_config.csv_sep = DEFAULT_SEPARATOR; - if (argc && !strncmp(argv[0], "rec", 3)) { + if (argc && strlen(argv[0]) > 2 && strstarts("record", argv[0])) { argc = __cmd_record(argc, argv); if (argc < 0) return -1; - } else if (argc && !strncmp(argv[0], "rep", 3)) + } else if (argc && strlen(argv[0]) > 2 && strstarts("report", argv[0])) return __cmd_report(argc, argv); interval = stat_config.interval; @@ -2418,6 +2389,34 @@ int cmd_stat(int argc, const char **argv) target.system_wide = true; } + if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) + target.per_thread = true; + + stat_config.system_wide = target.system_wide; + if (target.cpu_list) { + stat_config.user_requested_cpu_list = strdup(target.cpu_list); + if (!stat_config.user_requested_cpu_list) { + status = -ENOMEM; + goto out; + } + } + + /* + * Metric parsing needs to be delayed as metrics may optimize events + * knowing the target is system-wide. + */ + if (metrics) { + metricgroup__parse_groups(evsel_list, metrics, + stat_config.metric_no_group, + stat_config.metric_no_merge, + stat_config.user_requested_cpu_list, + stat_config.system_wide, + &stat_config.metric_events); + zfree(&metrics); + } + perf_stat__collect_metric_expr(evsel_list); + perf_stat__init_shadow_stats(); + if (add_default_attributes()) goto out; @@ -2437,9 +2436,6 @@ int cmd_stat(int argc, const char **argv) } } - if ((stat_config.aggr_mode == AGGR_THREAD) && (target.system_wide)) - target.per_thread = true; - if (evlist__fix_hybrid_cpus(evsel_list, target.cpu_list)) { pr_err("failed to use cpu list %s\n", target.cpu_list); goto out; @@ -2467,12 +2463,6 @@ int cmd_stat(int argc, const char **argv) */ if (stat_config.aggr_mode == AGGR_THREAD) { thread_map__read_comms(evsel_list->core.threads); - if (target.system_wide) { - if (runtime_stat_new(&stat_config, - perf_thread_map__nr(evsel_list->core.threads))) { - goto out; - } - } } if (stat_config.aggr_mode == AGGR_NODE) @@ -2538,6 +2528,8 @@ int cmd_stat(int argc, const char **argv) if (evlist__initialize_ctlfd(evsel_list, stat_config.ctl_fd, stat_config.ctl_fd_ack)) goto out; + /* Enable ignoring missing threads when -p option is defined. */ + evlist__first(evsel_list)->ignore_missing_thread = target.pid; status = 0; for (run_idx = 0; forever || run_idx < stat_config.run_count; run_idx++) { if (stat_config.run_count != 1 && verbose > 0) @@ -2603,6 +2595,7 @@ out: iostat_release(evsel_list); zfree(&stat_config.walltime_run); + zfree(&stat_config.user_requested_cpu_list); if (smi_cost && smi_reset) sysfs__write_int(FREEZE_ON_SMI_PATH, 0); @@ -2610,7 +2603,6 @@ out: evlist__delete(evsel_list); metricgroup__rblist_exit(&stat_config.metric_events); - runtime_stat_delete(&stat_config); evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); return status; |