aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-stat.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r--tools/perf/builtin-stat.c124
1 files changed, 72 insertions, 52 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index c941bb640f49..62ae30d34fa6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -173,24 +173,23 @@ static struct perf_event_attr very_very_detailed_attrs[] = {
-struct perf_evlist *evsel_list;
+static struct perf_evlist *evsel_list;
-static bool system_wide = false;
-static int run_idx = 0;
+static struct perf_target target = {
+ .uid = UINT_MAX,
+};
+static int run_idx = 0;
static int run_count = 1;
static bool no_inherit = false;
static bool scale = true;
static bool no_aggr = false;
-static const char *target_pid;
-static const char *target_tid;
static pid_t child_pid = -1;
static bool null_run = false;
static int detailed_run = 0;
static bool sync_run = false;
static bool big_num = true;
static int big_num_opt = -1;
-static const char *cpu_list;
static const char *csv_sep = NULL;
static bool csv_output = false;
static bool group = false;
@@ -265,24 +264,26 @@ static double stddev_stats(struct stats *stats)
return sqrt(variance_mean);
}
-struct stats runtime_nsecs_stats[MAX_NR_CPUS];
-struct stats runtime_cycles_stats[MAX_NR_CPUS];
-struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
-struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
-struct stats runtime_branches_stats[MAX_NR_CPUS];
-struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
-struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
-struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
-struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
-struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
-struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
-struct stats walltime_nsecs_stats;
+static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
+static struct stats runtime_cycles_stats[MAX_NR_CPUS];
+static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
+static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
+static struct stats runtime_branches_stats[MAX_NR_CPUS];
+static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
+static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
+static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
+static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
+static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
+static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
+static struct stats walltime_nsecs_stats;
static int create_perf_stat_counter(struct perf_evsel *evsel,
struct perf_evsel *first)
{
struct perf_event_attr *attr = &evsel->attr;
struct xyarray *group_fd = NULL;
+ bool exclude_guest_missing = false;
+ int ret;
if (group && evsel != first)
group_fd = first->fd;
@@ -293,16 +294,39 @@ static int create_perf_stat_counter(struct perf_evsel *evsel,
attr->inherit = !no_inherit;
- if (system_wide)
- return perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
- group, group_fd);
- if (!target_pid && !target_tid && (!group || evsel == first)) {
+retry:
+ if (exclude_guest_missing)
+ evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
+
+ if (perf_target__has_cpu(&target)) {
+ ret = perf_evsel__open_per_cpu(evsel, evsel_list->cpus,
+ group, group_fd);
+ if (ret)
+ goto check_ret;
+ return 0;
+ }
+
+ if (!perf_target__has_task(&target) && (!group || evsel == first)) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
- return perf_evsel__open_per_thread(evsel, evsel_list->threads,
- group, group_fd);
+ ret = perf_evsel__open_per_thread(evsel, evsel_list->threads,
+ group, group_fd);
+ if (!ret)
+ return 0;
+ /* fall through */
+check_ret:
+ if (ret && errno == EINVAL) {
+ if (!exclude_guest_missing &&
+ (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
+ pr_debug("Old kernel, cannot exclude "
+ "guest or host samples.\n");
+ exclude_guest_missing = true;
+ goto retry;
+ }
+ }
+ return ret;
}
/*
@@ -446,7 +470,7 @@ static int run_perf_stat(int argc __used, const char **argv)
exit(-1);
}
- if (!target_tid && !target_pid && !system_wide)
+ if (perf_target__none(&target))
evsel_list->threads->map[0] = child_pid;
/*
@@ -463,8 +487,13 @@ static int run_perf_stat(int argc __used, const char **argv)
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter, first) < 0) {
+ /*
+ * PPC returns ENXIO for HW counters until 2.6.37
+ * (behavior changed with commit b0a873e).
+ */
if (errno == EINVAL || errno == ENOSYS ||
- errno == ENOENT || errno == EOPNOTSUPP) {
+ errno == ENOENT || errno == EOPNOTSUPP ||
+ errno == ENXIO) {
if (verbose)
ui__warning("%s event is not supported by the kernel.\n",
event_name(counter));
@@ -476,7 +505,7 @@ static int run_perf_stat(int argc __used, const char **argv)
error("You may not have permission to collect %sstats.\n"
"\t Consider tweaking"
" /proc/sys/kernel/perf_event_paranoid or running as root.",
- system_wide ? "system-wide " : "");
+ target.system_wide ? "system-wide " : "");
} else {
error("open_counter returned with %d (%s). "
"/bin/dmesg may provide additional information.\n",
@@ -968,14 +997,14 @@ static void print_stat(int argc, const char **argv)
if (!csv_output) {
fprintf(output, "\n");
fprintf(output, " Performance counter stats for ");
- if (!target_pid && !target_tid) {
+ if (!perf_target__has_task(&target)) {
fprintf(output, "\'%s", argv[0]);
for (i = 1; i < argc; i++)
fprintf(output, " %s", argv[i]);
- } else if (target_pid)
- fprintf(output, "process id \'%s", target_pid);
+ } else if (target.pid)
+ fprintf(output, "process id \'%s", target.pid);
else
- fprintf(output, "thread id \'%s", target_tid);
+ fprintf(output, "thread id \'%s", target.tid);
fprintf(output, "\'");
if (run_count > 1)
@@ -1049,11 +1078,11 @@ static const struct option options[] = {
"event filter", parse_filter),
OPT_BOOLEAN('i', "no-inherit", &no_inherit,
"child tasks do not inherit counters"),
- OPT_STRING('p', "pid", &target_pid, "pid",
+ OPT_STRING('p', "pid", &target.pid, "pid",
"stat events on existing process id"),
- OPT_STRING('t', "tid", &target_tid, "tid",
+ OPT_STRING('t', "tid", &target.tid, "tid",
"stat events on existing thread id"),
- OPT_BOOLEAN('a', "all-cpus", &system_wide,
+ OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
"system-wide collection from all CPUs"),
OPT_BOOLEAN('g', "group", &group,
"put the counters into a counter group"),
@@ -1072,7 +1101,7 @@ static const struct option options[] = {
OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
"print large numbers with thousands\' separators",
stat__set_big_num),
- OPT_STRING('C', "cpu", &cpu_list, "cpu",
+ OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
"list of cpus to monitor in system-wide"),
OPT_BOOLEAN('A', "no-aggr", &no_aggr,
"disable CPU count aggregation"),
@@ -1190,13 +1219,13 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
} else if (big_num_opt == 0) /* User passed --no-big-num */
big_num = false;
- if (!argc && !target_pid && !target_tid)
+ if (!argc && !perf_target__has_task(&target))
usage_with_options(stat_usage, options);
if (run_count <= 0)
usage_with_options(stat_usage, options);
/* no_aggr, cgroup are for system-wide only */
- if ((no_aggr || nr_cgroups) && !system_wide) {
+ if ((no_aggr || nr_cgroups) && !perf_target__has_cpu(&target)) {
fprintf(stderr, "both cgroup and no-aggregation "
"modes only available in system-wide mode\n");
@@ -1206,23 +1235,14 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
if (add_default_attributes())
goto out;
- if (target_pid)
- target_tid = target_pid;
+ perf_target__validate(&target);
- evsel_list->threads = thread_map__new_str(target_pid,
- target_tid, UINT_MAX);
- if (evsel_list->threads == NULL) {
- pr_err("Problems finding threads of monitor\n");
- usage_with_options(stat_usage, options);
- }
-
- if (system_wide)
- evsel_list->cpus = cpu_map__new(cpu_list);
- else
- evsel_list->cpus = cpu_map__dummy_new();
+ if (perf_evlist__create_maps(evsel_list, &target) < 0) {
+ if (perf_target__has_task(&target))
+ pr_err("Problems finding threads of monitor\n");
+ if (perf_target__has_cpu(&target))
+ perror("failed to parse CPUs map");
- if (evsel_list->cpus == NULL) {
- perror("failed to parse CPUs map");
usage_with_options(stat_usage, options);
return -1;
}