aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/perf/util/evsel.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--tools/perf/util/evsel.c2263
1 files changed, 1803 insertions, 460 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 1cad6051d8b0..d55482f094bf 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -5,6 +5,11 @@
* Parts came from builtin-{top,stat,record}.c, see those files for further
* copyright notes.
*/
+/*
+ * Powerpc needs __SANE_USERSPACE_TYPES__ before <linux/types.h> to select
+ * 'int-ll64.h' and avoid compile warnings when printing __u64 with %llu.
+ */
+#define __SANE_USERSPACE_TYPES__
#include <byteswap.h>
#include <errno.h>
@@ -12,7 +17,6 @@
#include <linux/bitops.h>
#include <api/fs/fs.h>
#include <api/fs/tracing_path.h>
-#include <traceevent/event-parse.h>
#include <linux/hw_breakpoint.h>
#include <linux/perf_event.h>
#include <linux/compiler.h>
@@ -20,16 +24,19 @@
#include <linux/zalloc.h>
#include <sys/ioctl.h>
#include <sys/resource.h>
+#include <sys/syscall.h>
#include <sys/types.h>
#include <dirent.h>
#include <stdlib.h>
#include <perf/evsel.h>
#include "asm/bug.h"
+#include "bpf_counter.h"
#include "callchain.h"
#include "cgroup.h"
#include "counts.h"
#include "event.h"
#include "evsel.h"
+#include "time-utils.h"
#include "util/env.h"
#include "util/evsel_config.h"
#include "util/evsel_fprintf.h"
@@ -45,13 +52,28 @@
#include "string2.h"
#include "memswap.h"
#include "util.h"
+#include "util/hashmap.h"
+#include "off_cpu.h"
+#include "pmu.h"
+#include "pmus.h"
+#include "hwmon_pmu.h"
+#include "tool_pmu.h"
+#include "rlimit.h"
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
+#include "util/bpf-filter.h"
+#include "util/hist.h"
#include <internal/xyarray.h>
#include <internal/lib.h>
+#include <internal/threadmap.h>
+#include "util/intel-tpebs.h"
#include <linux/ctype.h>
+#ifdef HAVE_LIBTRACEEVENT
+#include <event-parse.h>
+#endif
+
struct perf_missing_features perf_missing_features;
static clockid_t clockid;
@@ -61,7 +83,123 @@ static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
return 0;
}
-void __weak test_attr__ready(void) { }
+static bool test_attr__enabled(void)
+{
+ static bool test_attr__enabled;
+ static bool test_attr__enabled_tested;
+
+ if (!test_attr__enabled_tested) {
+ char *dir = getenv("PERF_TEST_ATTR");
+
+ test_attr__enabled = (dir != NULL);
+ test_attr__enabled_tested = true;
+ }
+ return test_attr__enabled;
+}
+
+#define __WRITE_ASS(str, fmt, data) \
+do { \
+ if (fprintf(file, #str "=%"fmt "\n", data) < 0) { \
+ perror("test attr - failed to write event file"); \
+ fclose(file); \
+ return -1; \
+ } \
+} while (0)
+
+#define WRITE_ASS(field, fmt) __WRITE_ASS(field, fmt, attr->field)
+
+static int store_event(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
+ int fd, int group_fd, unsigned long flags)
+{
+ FILE *file;
+ char path[PATH_MAX];
+ char *dir = getenv("PERF_TEST_ATTR");
+
+ snprintf(path, PATH_MAX, "%s/event-%d-%llu-%d", dir,
+ attr->type, attr->config, fd);
+
+ file = fopen(path, "w+");
+ if (!file) {
+ perror("test attr - failed to open event file");
+ return -1;
+ }
+
+ if (fprintf(file, "[event-%d-%llu-%d]\n",
+ attr->type, attr->config, fd) < 0) {
+ perror("test attr - failed to write event file");
+ fclose(file);
+ return -1;
+ }
+
+ /* syscall arguments */
+ __WRITE_ASS(fd, "d", fd);
+ __WRITE_ASS(group_fd, "d", group_fd);
+ __WRITE_ASS(cpu, "d", cpu.cpu);
+ __WRITE_ASS(pid, "d", pid);
+ __WRITE_ASS(flags, "lu", flags);
+
+ /* struct perf_event_attr */
+ WRITE_ASS(type, PRIu32);
+ WRITE_ASS(size, PRIu32);
+ WRITE_ASS(config, "llu");
+ WRITE_ASS(sample_period, "llu");
+ WRITE_ASS(sample_type, "llu");
+ WRITE_ASS(read_format, "llu");
+ WRITE_ASS(disabled, "d");
+ WRITE_ASS(inherit, "d");
+ WRITE_ASS(pinned, "d");
+ WRITE_ASS(exclusive, "d");
+ WRITE_ASS(exclude_user, "d");
+ WRITE_ASS(exclude_kernel, "d");
+ WRITE_ASS(exclude_hv, "d");
+ WRITE_ASS(exclude_idle, "d");
+ WRITE_ASS(mmap, "d");
+ WRITE_ASS(comm, "d");
+ WRITE_ASS(freq, "d");
+ WRITE_ASS(inherit_stat, "d");
+ WRITE_ASS(enable_on_exec, "d");
+ WRITE_ASS(task, "d");
+ WRITE_ASS(watermark, "d");
+ WRITE_ASS(precise_ip, "d");
+ WRITE_ASS(mmap_data, "d");
+ WRITE_ASS(sample_id_all, "d");
+ WRITE_ASS(exclude_host, "d");
+ WRITE_ASS(exclude_guest, "d");
+ WRITE_ASS(exclude_callchain_kernel, "d");
+ WRITE_ASS(exclude_callchain_user, "d");
+ WRITE_ASS(mmap2, "d");
+ WRITE_ASS(comm_exec, "d");
+ WRITE_ASS(context_switch, "d");
+ WRITE_ASS(write_backward, "d");
+ WRITE_ASS(namespaces, "d");
+ WRITE_ASS(use_clockid, "d");
+ WRITE_ASS(wakeup_events, PRIu32);
+ WRITE_ASS(bp_type, PRIu32);
+ WRITE_ASS(config1, "llu");
+ WRITE_ASS(config2, "llu");
+ WRITE_ASS(branch_sample_type, "llu");
+ WRITE_ASS(sample_regs_user, "llu");
+ WRITE_ASS(sample_stack_user, PRIu32);
+
+ fclose(file);
+ return 0;
+}
+
+#undef __WRITE_ASS
+#undef WRITE_ASS
+
+static void test_attr__open(struct perf_event_attr *attr, pid_t pid, struct perf_cpu cpu,
+ int fd, int group_fd, unsigned long flags)
+{
+ int errno_saved = errno;
+
+ if ((fd != -1) && store_event(attr, pid, cpu, fd, group_fd, flags)) {
+ pr_err("test attr FAILED");
+ exit(128);
+ }
+
+ errno = errno_saved;
+}
static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
{
@@ -99,6 +237,16 @@ set_methods:
return 0;
}
+const char *evsel__pmu_name(const struct evsel *evsel)
+{
+ struct perf_pmu *pmu = evsel__find_pmu(evsel);
+
+ if (pmu)
+ return pmu->name;
+
+ return event_type(evsel->core.attr.type);
+}
+
#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
int __evsel__sample_size(u64 sample_type)
@@ -236,27 +384,28 @@ bool evsel__is_function_event(struct evsel *evsel)
void evsel__init(struct evsel *evsel,
struct perf_event_attr *attr, int idx)
{
- perf_evsel__init(&evsel->core, attr);
- evsel->idx = idx;
+ perf_evsel__init(&evsel->core, attr, idx);
evsel->tracking = !idx;
- evsel->leader = evsel;
- evsel->unit = "";
+ evsel->unit = strdup("");
evsel->scale = 1.0;
evsel->max_events = ULONG_MAX;
evsel->evlist = NULL;
evsel->bpf_obj = NULL;
evsel->bpf_fd = -1;
INIT_LIST_HEAD(&evsel->config_terms);
+ INIT_LIST_HEAD(&evsel->bpf_counter_list);
+ INIT_LIST_HEAD(&evsel->bpf_filters);
perf_evsel__object.init(evsel);
evsel->sample_size = __evsel__sample_size(attr->sample_type);
evsel__calc_id_pos(evsel);
evsel->cmdline_group_boundary = false;
- evsel->metric_expr = NULL;
- evsel->metric_name = NULL;
evsel->metric_events = NULL;
evsel->per_pkg_mask = NULL;
evsel->collect_stat = false;
- evsel->pmu_name = NULL;
+ evsel->group_pmu_name = NULL;
+ evsel->skippable = false;
+ evsel->alternate_hw_config = PERF_COUNT_HW_MAX;
+ evsel->script_output_type = -1; // FIXME: OUTPUT_TYPE_UNSET, see builtin-script.c
}
struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
@@ -267,75 +416,26 @@ struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
return NULL;
evsel__init(evsel, attr, idx);
- if (evsel__is_bpf_output(evsel)) {
- evsel->core.attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ if (evsel__is_bpf_output(evsel) && !attr->sample_type) {
+ evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
evsel->core.attr.sample_period = 1;
}
if (evsel__is_clock(evsel)) {
- /*
- * The evsel->unit points to static alias->unit
- * so it's ok to use static string in here.
- */
- static const char *unit = "msec";
-
- evsel->unit = unit;
+ free((char *)evsel->unit);
+ evsel->unit = strdup("msec");
evsel->scale = 1e-6;
}
return evsel;
}
-static bool perf_event_can_profile_kernel(void)
-{
- return perf_event_paranoid_check(1);
-}
-
-struct evsel *evsel__new_cycles(bool precise)
-{
- struct perf_event_attr attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
- .exclude_kernel = !perf_event_can_profile_kernel(),
- };
- struct evsel *evsel;
-
- event_attr_init(&attr);
-
- if (!precise)
- goto new_event;
-
- /*
- * Now let the usual logic to set up the perf_event_attr defaults
- * to kick in when we return and before perf_evsel__open() is called.
- */
-new_event:
- evsel = evsel__new(&attr);
- if (evsel == NULL)
- goto out;
-
- evsel->precise_max = true;
-
- /* use asprintf() because free(evsel) assumes name is allocated */
- if (asprintf(&evsel->name, "cycles%s%s%.*s",
- (attr.precise_ip || attr.exclude_kernel) ? ":" : "",
- attr.exclude_kernel ? "u" : "",
- attr.precise_ip ? attr.precise_ip + 1 : 0, "ppp") < 0)
- goto error_free;
-out:
- return evsel;
-error_free:
- evsel__delete(evsel);
- evsel = NULL;
- goto out;
-}
-
-static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
+int copy_config_terms(struct list_head *dst, struct list_head *src)
{
struct evsel_config_term *pos, *tmp;
- list_for_each_entry(pos, &src->config_terms, list) {
+ list_for_each_entry(pos, src, list) {
tmp = malloc(sizeof(*tmp));
if (tmp == NULL)
return -ENOMEM;
@@ -348,11 +448,16 @@ static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
return -ENOMEM;
}
}
- list_add_tail(&tmp->list, &dst->config_terms);
+ list_add_tail(&tmp->list, dst);
}
return 0;
}
+static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
+{
+ return copy_config_terms(&dst->config_terms, &src->config_terms);
+}
+
/**
* evsel__clone - create a new evsel copied from @orig
* @orig: original evsel
@@ -360,7 +465,7 @@ static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
* The assumption is that @orig is not configured nor opened yet.
* So we only care about the attributes that can be set while it's parsed.
*/
-struct evsel *evsel__clone(struct evsel *orig)
+struct evsel *evsel__clone(struct evsel *dest, struct evsel *orig)
{
struct evsel *evsel;
@@ -373,7 +478,11 @@ struct evsel *evsel__clone(struct evsel *orig)
if (orig->bpf_obj)
return NULL;
- evsel = evsel__new(&orig->core.attr);
+ if (dest)
+ evsel = dest;
+ else
+ evsel = evsel__new(&orig->core.attr);
+
if (evsel == NULL)
return NULL;
@@ -382,6 +491,8 @@ struct evsel *evsel__clone(struct evsel *orig)
evsel->core.threads = perf_thread_map__get(orig->core.threads);
evsel->core.nr_members = orig->core.nr_members;
evsel->core.system_wide = orig->core.system_wide;
+ evsel->core.requires_cpu = orig->core.requires_cpu;
+ evsel->core.is_pmu_core = orig->core.is_pmu_core;
if (orig->name) {
evsel->name = strdup(orig->name);
@@ -393,9 +504,9 @@ struct evsel *evsel__clone(struct evsel *orig)
if (evsel->group_name == NULL)
goto out_err;
}
- if (orig->pmu_name) {
- evsel->pmu_name = strdup(orig->pmu_name);
- if (evsel->pmu_name == NULL)
+ if (orig->group_pmu_name) {
+ evsel->group_pmu_name = strdup(orig->group_pmu_name);
+ if (evsel->group_pmu_name == NULL)
goto out_err;
}
if (orig->filter) {
@@ -403,31 +514,55 @@ struct evsel *evsel__clone(struct evsel *orig)
if (evsel->filter == NULL)
goto out_err;
}
+ if (orig->metric_id) {
+ evsel->metric_id = strdup(orig->metric_id);
+ if (evsel->metric_id == NULL)
+ goto out_err;
+ }
evsel->cgrp = cgroup__get(orig->cgrp);
+#ifdef HAVE_LIBTRACEEVENT
+ if (orig->tp_sys) {
+ evsel->tp_sys = strdup(orig->tp_sys);
+ if (evsel->tp_sys == NULL)
+ goto out_err;
+ }
+ if (orig->tp_name) {
+ evsel->tp_name = strdup(orig->tp_name);
+ if (evsel->tp_name == NULL)
+ goto out_err;
+ }
evsel->tp_format = orig->tp_format;
+#endif
evsel->handler = orig->handler;
- evsel->leader = orig->leader;
+ evsel->core.leader = orig->core.leader;
evsel->max_events = orig->max_events;
- evsel->tool_event = orig->tool_event;
- evsel->unit = orig->unit;
+ zfree(&evsel->unit);
+ if (orig->unit) {
+ evsel->unit = strdup(orig->unit);
+ if (evsel->unit == NULL)
+ goto out_err;
+ }
evsel->scale = orig->scale;
evsel->snapshot = orig->snapshot;
evsel->per_pkg = orig->per_pkg;
evsel->percore = orig->percore;
evsel->precise_max = orig->precise_max;
- evsel->use_uncore_alias = orig->use_uncore_alias;
evsel->is_libpfm_event = orig->is_libpfm_event;
evsel->exclude_GH = orig->exclude_GH;
evsel->sample_read = orig->sample_read;
- evsel->auto_merge_stats = orig->auto_merge_stats;
evsel->collect_stat = orig->collect_stat;
evsel->weak_group = orig->weak_group;
+ evsel->use_config_name = orig->use_config_name;
+ evsel->pmu = orig->pmu;
+ evsel->first_wildcard_match = orig->first_wildcard_match;
if (evsel__copy_config_terms(evsel, orig) < 0)
goto out_err;
+ evsel->alternate_hw_config = orig->alternate_hw_config;
+
return evsel;
out_err:
@@ -435,48 +570,108 @@ out_err:
return NULL;
}
+static int trace_event__id(const char *sys, const char *name)
+{
+ char *tp_dir = get_events_file(sys);
+ char path[PATH_MAX];
+ int id, err;
+
+ if (!tp_dir)
+ return -1;
+
+ scnprintf(path, PATH_MAX, "%s/%s/id", tp_dir, name);
+ put_events_file(tp_dir);
+ err = filename__read_int(path, &id);
+ if (err)
+ return err;
+
+ return id;
+}
+
/*
* Returns pointer with encoded error via <linux/err.h> interface.
*/
-struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx)
+struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format)
{
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_TRACEPOINT,
+ .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
+ PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
+ };
struct evsel *evsel = zalloc(perf_evsel__object.size);
- int err = -ENOMEM;
+ int err = -ENOMEM, id = -1;
- if (evsel == NULL) {
+ if (evsel == NULL)
goto out_err;
- } else {
- struct perf_event_attr attr = {
- .type = PERF_TYPE_TRACEPOINT,
- .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
- PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
- };
- if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
- goto out_free;
- evsel->tp_format = trace_event__tp_format(sys, name);
- if (IS_ERR(evsel->tp_format)) {
- err = PTR_ERR(evsel->tp_format);
+ if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
+ goto out_free;
+
+#ifdef HAVE_LIBTRACEEVENT
+ evsel->tp_sys = strdup(sys);
+ if (!evsel->tp_sys)
+ goto out_free;
+
+ evsel->tp_name = strdup(name);
+ if (!evsel->tp_name)
+ goto out_free;
+#endif
+
+ event_attr_init(&attr);
+
+ if (format) {
+ id = trace_event__id(sys, name);
+ if (id < 0) {
+ err = id;
goto out_free;
}
-
- event_attr_init(&attr);
- attr.config = evsel->tp_format->id;
- attr.sample_period = 1;
- evsel__init(evsel, &attr, idx);
}
-
+ attr.config = (__u64)id;
+ attr.sample_period = 1;
+ evsel__init(evsel, &attr, idx);
return evsel;
out_free:
zfree(&evsel->name);
+#ifdef HAVE_LIBTRACEEVENT
+ zfree(&evsel->tp_sys);
+ zfree(&evsel->tp_name);
+#endif
free(evsel);
out_err:
return ERR_PTR(err);
}
-const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
+#ifdef HAVE_LIBTRACEEVENT
+struct tep_event *evsel__tp_format(struct evsel *evsel)
+{
+ struct tep_event *tp_format = evsel->tp_format;
+
+ if (tp_format)
+ return tp_format;
+
+ if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
+ return NULL;
+
+ if (!evsel->tp_sys)
+ tp_format = trace_event__tp_format_id(evsel->core.attr.config);
+ else
+ tp_format = trace_event__tp_format(evsel->tp_sys, evsel->tp_name);
+
+ if (IS_ERR(tp_format)) {
+ int err = -PTR_ERR(evsel->tp_format);
+
+ pr_err("Error getting tracepoint format '%s' '%s'(%d)\n",
+ evsel__name(evsel), strerror(err), err);
+ return NULL;
+ }
+ evsel->tp_format = tp_format;
+ return evsel->tp_format;
+}
+#endif
+
+const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = {
"cycles",
"instructions",
"cache-references",
@@ -489,6 +684,28 @@ const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
"ref-cycles",
};
+char *evsel__bpf_counter_events;
+
+bool evsel__match_bpf_counter_events(const char *name)
+{
+ int name_len;
+ bool match;
+ char *ptr;
+
+ if (!evsel__bpf_counter_events)
+ return false;
+
+ ptr = strstr(evsel__bpf_counter_events, name);
+ name_len = strlen(name);
+
+ /* check name matches a full token in evsel__bpf_counter_events */
+ match = (ptr != NULL) &&
+ ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
+ ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
+
+ return match;
+}
+
static const char *__evsel__hw_name(u64 config)
{
if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
@@ -497,11 +714,10 @@ static const char *__evsel__hw_name(u64 config)
return "unknown-hardware";
}
-static int perf_evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
+static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
{
int colon = 0, r = 0;
struct perf_event_attr *attr = &evsel->core.attr;
- bool exclude_guest_default = false;
#define MOD_PRINT(context, mod) do { \
if (!attr->exclude_##context) { \
@@ -513,17 +729,15 @@ static int perf_evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
MOD_PRINT(kernel, 'k');
MOD_PRINT(user, 'u');
MOD_PRINT(hv, 'h');
- exclude_guest_default = true;
}
if (attr->precise_ip) {
if (!colon)
colon = ++r;
r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
- exclude_guest_default = true;
}
- if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
+ if (attr->exclude_host || attr->exclude_guest) {
MOD_PRINT(host, 'H');
MOD_PRINT(guest, 'G');
}
@@ -533,13 +747,18 @@ static int perf_evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
return r;
}
+int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
+{
+ return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
+}
+
static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
{
- int r = scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
- return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+ int r = arch_evsel__hw_name(evsel, bf, size);
+ return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
-const char *evsel__sw_names[PERF_COUNT_SW_MAX] = {
+const char *const evsel__sw_names[PERF_COUNT_SW_MAX] = {
"cpu-clock",
"task-clock",
"page-faults",
@@ -562,7 +781,7 @@ static const char *__evsel__sw_name(u64 config)
static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
{
int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config));
- return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+ return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
@@ -587,10 +806,10 @@ static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
{
struct perf_event_attr *attr = &evsel->core.attr;
int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
- return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
+ return r + evsel__add_modifiers(evsel, bf + r, size - r);
}
-const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
+const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
{ "L1-dcache", "l1-d", "l1d", "L1-data", },
{ "L1-icache", "l1-i", "l1i", "L1-instruction", },
{ "LLC", "L2", },
@@ -600,13 +819,13 @@ const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
{ "node", },
};
-const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
+const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
{ "load", "loads", "read", },
{ "store", "stores", "write", },
{ "prefetch", "prefetches", "speculative-read", "speculative-load", },
};
-const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
+const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
{ "refs", "Reference", "ops", "access", },
{ "misses", "miss", },
};
@@ -618,11 +837,11 @@ const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_AL
#define COP(x) (1 << x)
/*
- * cache operartion stat
+ * cache operation stat
* L1I : Read and prefetch only
* ITLB and BPU : Read-only
*/
-static unsigned long evsel__hw_cache_stat[C(MAX)] = {
+static const unsigned long evsel__hw_cache_stat[C(MAX)] = {
[C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
[C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
[C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
@@ -682,19 +901,13 @@ out_err:
static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
{
int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size);
- return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
+ return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
}
static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
{
int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
- return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
-}
-
-static int evsel__tool_name(char *bf, size_t size)
-{
- int ret = scnprintf(bf, size, "duration_time");
- return ret;
+ return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
}
const char *evsel__name(struct evsel *evsel)
@@ -721,10 +934,7 @@ const char *evsel__name(struct evsel *evsel)
break;
case PERF_TYPE_SOFTWARE:
- if (evsel->tool_event)
- evsel__tool_name(bf, sizeof(bf));
- else
- evsel__sw_name(evsel, bf, sizeof(bf));
+ evsel__sw_name(evsel, bf, sizeof(bf));
break;
case PERF_TYPE_TRACEPOINT:
@@ -735,6 +945,10 @@ const char *evsel__name(struct evsel *evsel)
evsel__bp_name(evsel, bf, sizeof(bf));
break;
+ case PERF_PMU_TYPE_TOOL:
+ scnprintf(bf, sizeof(bf), "%s", evsel__tool_pmu_event_name(evsel));
+ break;
+
default:
scnprintf(bf, sizeof(bf), "unknown attr type: %d",
evsel->core.attr.type);
@@ -749,6 +963,22 @@ out_unknown:
return "unknown";
}
+bool evsel__name_is(struct evsel *evsel, const char *name)
+{
+ return !strcmp(evsel__name(evsel), name);
+}
+
+const char *evsel__metric_id(const struct evsel *evsel)
+{
+ if (evsel->metric_id)
+ return evsel->metric_id;
+
+ if (evsel__is_tool(evsel))
+ return evsel__tool_pmu_event_name(evsel);
+
+ return "unknown";
+}
+
const char *evsel__group_name(struct evsel *evsel)
{
return evsel->group_name ?: "anon group";
@@ -767,16 +997,22 @@ const char *evsel__group_name(struct evsel *evsel)
int evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
{
int ret = 0;
+ bool first = true;
struct evsel *pos;
const char *group_name = evsel__group_name(evsel);
if (!evsel->forced_leader)
ret = scnprintf(buf, size, "%s { ", group_name);
- ret += scnprintf(buf + ret, size - ret, "%s", evsel__name(evsel));
+ for_each_group_evsel(pos, evsel) {
+ if (symbol_conf.skip_empty &&
+ evsel__hists(pos)->stats.nr_samples == 0)
+ continue;
- for_each_group_member(pos, evsel)
- ret += scnprintf(buf + ret, size - ret, ", %s", evsel__name(pos));
+ ret += scnprintf(buf + ret, size - ret, "%s%s",
+ first ? "" : ", ", evsel__name(pos));
+ first = false;
+ }
if (!evsel->forced_leader)
ret += scnprintf(buf + ret, size - ret, " }");
@@ -819,15 +1055,18 @@ static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *o
if (param->record_mode == CALLCHAIN_DWARF) {
if (!function) {
+ const char *arch = perf_env__arch(evsel__env(evsel));
+
evsel__set_sample_bit(evsel, REGS_USER);
evsel__set_sample_bit(evsel, STACK_USER);
- if (opts->sample_user_regs && DWARF_MINIMAL_REGS != PERF_REGS_MASK) {
- attr->sample_regs_user |= DWARF_MINIMAL_REGS;
+ if (opts->sample_user_regs &&
+ DWARF_MINIMAL_REGS(arch) != arch__user_reg_mask()) {
+ attr->sample_regs_user |= DWARF_MINIMAL_REGS(arch);
pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
"specifying a subset with --user-regs may render DWARF unwinding unreliable, "
"so the minimal registers set (IP, SP) is explicitly forced.\n");
} else {
- attr->sample_regs_user |= PERF_REGS_MASK;
+ attr->sample_regs_user |= arch__user_reg_mask();
}
attr->sample_stack_user = param->dump_size;
attr->exclude_callchain_user = 1;
@@ -850,9 +1089,7 @@ void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
return __evsel__config_callchain(evsel, opts, param);
}
-static void
-perf_evsel__reset_callgraph(struct evsel *evsel,
- struct callchain_param *param)
+static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param)
{
struct perf_event_attr *attr = &evsel->core.attr;
@@ -944,6 +1181,9 @@ static void evsel__apply_config_terms(struct evsel *evsel,
case EVSEL__CONFIG_TERM_AUX_OUTPUT:
attr->aux_output = term->val.aux_output ? 1 : 0;
break;
+ case EVSEL__CONFIG_TERM_AUX_ACTION:
+ /* Already applied by auxtrace */
+ break;
case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
/* Already applied by auxtrace */
break;
@@ -988,7 +1228,7 @@ static void evsel__apply_config_terms(struct evsel *evsel,
/* If global callgraph set, clear it */
if (callchain_param.enabled)
- perf_evsel__reset_callgraph(evsel, &callchain_param);
+ evsel__reset_callgraph(evsel, &callchain_param);
/* set perf-event callgraph */
if (param.enabled) {
@@ -1014,6 +1254,33 @@ struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evs
return found_term;
}
+void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
+{
+ evsel__set_sample_bit(evsel, WEIGHT);
+}
+
+void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
+ struct perf_event_attr *attr __maybe_unused)
+{
+}
+
+static void evsel__set_default_freq_period(struct record_opts *opts,
+ struct perf_event_attr *attr)
+{
+ if (opts->freq) {
+ attr->freq = 1;
+ attr->sample_freq = opts->freq;
+ } else {
+ attr->sample_period = opts->default_interval;
+ }
+}
+
+bool evsel__is_offcpu_event(struct evsel *evsel)
+{
+ return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT) &&
+ evsel->core.attr.sample_type & PERF_SAMPLE_RAW;
+}
+
/*
* The enable_on_exec/disabled value strategy:
*
@@ -1045,14 +1312,15 @@ struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evs
void evsel__config(struct evsel *evsel, struct record_opts *opts,
struct callchain_param *callchain)
{
- struct evsel *leader = evsel->leader;
+ struct evsel *leader = evsel__leader(evsel);
struct perf_event_attr *attr = &evsel->core.attr;
int track = evsel->tracking;
bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
- attr->inherit = !opts->no_inherit;
+ attr->inherit = target__has_cpu(&opts->target) ? 0 : !opts->no_inherit;
attr->write_backward = opts->overwrite ? 1 : 0;
+ attr->read_format = PERF_FORMAT_LOST;
evsel__set_sample_bit(evsel, IP);
evsel__set_sample_bit(evsel, TID);
@@ -1072,7 +1340,15 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
*/
if (leader->core.nr_members > 1) {
attr->read_format |= PERF_FORMAT_GROUP;
- attr->inherit = 0;
+ }
+
+ /*
+ * Inherit + SAMPLE_READ requires SAMPLE_TID in the read_format
+ */
+ if (attr->inherit) {
+ evsel__set_sample_bit(evsel, TID);
+ evsel->core.attr.read_format |=
+ PERF_FORMAT_ID;
}
}
@@ -1080,14 +1356,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
* We default some events to have a default interval. But keep
* it a weak assumption overridable by the user.
*/
- if (!attr->sample_period) {
- if (opts->freq) {
- attr->freq = 1;
- attr->sample_freq = opts->freq;
- } else {
- attr->sample_period = opts->default_interval;
- }
- }
+ if ((evsel->is_libpfm_event && !attr->sample_period) ||
+ (!evsel->is_libpfm_event && (!attr->sample_period ||
+ opts->user_freq != UINT_MAX ||
+ opts->user_interval != ULLONG_MAX)))
+ evsel__set_default_freq_period(opts, attr);
+
/*
* If attr->freq was set (here or earlier), ask for period
* to be sampled.
@@ -1152,7 +1426,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
evsel__set_sample_bit(evsel, CPU);
}
- if (opts->sample_address)
+ if (opts->sample_data_src)
evsel__set_sample_bit(evsel, DATA_SRC);
if (opts->sample_phys_addr)
@@ -1167,13 +1441,16 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
attr->branch_sample_type = opts->branch_stack;
}
- if (opts->sample_weight)
- evsel__set_sample_bit(evsel, WEIGHT);
+ if (opts->sample_weight || evsel->retire_lat) {
+ arch_evsel__set_sample_weight(evsel);
+ evsel->retire_lat = false;
+ }
+ attr->task = track;
+ attr->mmap = track;
+ attr->mmap2 = track && !perf_missing_features.mmap2;
+ attr->comm = track;
+ attr->build_id = track && opts->build_id;
- attr->task = track;
- attr->mmap = track;
- attr->mmap2 = track && !perf_missing_features.mmap2;
- attr->comm = track;
/*
* ksymbol is tracked separately with text poke because it needs to be
* system wide and enabled immediately.
@@ -1190,6 +1467,12 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
evsel__set_sample_bit(evsel, CGROUP);
}
+ if (opts->sample_data_page_size)
+ evsel__set_sample_bit(evsel, DATA_PAGE_SIZE);
+
+ if (opts->sample_code_page_size)
+ evsel__set_sample_bit(evsel, CODE_PAGE_SIZE);
+
if (opts->record_switch_events)
attr->context_switch = track;
@@ -1216,7 +1499,7 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
* group leaders for traced executed by perf.
*/
if (target__none(&opts->target) && evsel__is_group_leader(evsel) &&
- !opts->initial_delay)
+ !opts->target.initial_delay)
attr->enable_on_exec = 1;
if (evsel->immediate) {
@@ -1272,6 +1555,13 @@ void evsel__config(struct evsel *evsel, struct record_opts *opts,
*/
if (evsel__is_dummy_event(evsel))
evsel__reset_sample_bit(evsel, BRANCH_STACK);
+
+ if (evsel__is_offcpu_event(evsel)) {
+ evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
+ attr->inherit = 0;
+ }
+
+ arch__post_evsel_config(evsel, attr);
}
int evsel__set_filter(struct evsel *evsel, const char *filter)
@@ -1314,9 +1604,9 @@ int evsel__append_addr_filter(struct evsel *evsel, const char *filter)
}
/* Caller has to clear disabled after going through all CPUs. */
-int evsel__enable_cpu(struct evsel *evsel, int cpu)
+int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx)
{
- return perf_evsel__enable_cpu(&evsel->core, cpu);
+ return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
}
int evsel__enable(struct evsel *evsel)
@@ -1329,9 +1619,9 @@ int evsel__enable(struct evsel *evsel)
}
/* Caller has to set disabled after going through all CPUs. */
-int evsel__disable_cpu(struct evsel *evsel, int cpu)
+int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx)
{
- return perf_evsel__disable_cpu(&evsel->core, cpu);
+ return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx);
}
int evsel__disable(struct evsel *evsel)
@@ -1349,11 +1639,11 @@ int evsel__disable(struct evsel *evsel)
return err;
}
-static void evsel__free_config_terms(struct evsel *evsel)
+void free_config_terms(struct list_head *config_terms)
{
struct evsel_config_term *term, *h;
- list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
+ list_for_each_entry_safe(term, h, config_terms, list) {
list_del_init(&term->list);
if (term->free_str)
zfree(&term->val.str);
@@ -1361,10 +1651,19 @@ static void evsel__free_config_terms(struct evsel *evsel)
}
}
+static void evsel__free_config_terms(struct evsel *evsel)
+{
+ free_config_terms(&evsel->config_terms);
+}
+
void evsel__exit(struct evsel *evsel)
{
assert(list_empty(&evsel->core.node));
assert(evsel->evlist == NULL);
+ if (evsel__is_retire_lat(evsel))
+ evsel__tpebs_close(evsel);
+ bpf_counter__destroy(evsel);
+ perf_bpf_filter__destroy(evsel);
evsel__free_counts(evsel);
perf_evsel__free_fd(&evsel->core);
perf_evsel__free_id(&evsel->core);
@@ -1375,19 +1674,34 @@ void evsel__exit(struct evsel *evsel)
perf_thread_map__put(evsel->core.threads);
zfree(&evsel->group_name);
zfree(&evsel->name);
- zfree(&evsel->pmu_name);
- zfree(&evsel->per_pkg_mask);
+#ifdef HAVE_LIBTRACEEVENT
+ zfree(&evsel->tp_sys);
+ zfree(&evsel->tp_name);
+#endif
+ zfree(&evsel->filter);
+ zfree(&evsel->group_pmu_name);
+ zfree(&evsel->unit);
+ zfree(&evsel->metric_id);
+ evsel__zero_per_pkg(evsel);
+ hashmap__free(evsel->per_pkg_mask);
+ evsel->per_pkg_mask = NULL;
zfree(&evsel->metric_events);
perf_evsel__object.fini(evsel);
+ if (evsel__tool_event(evsel) == TOOL_PMU__EVENT_SYSTEM_TIME ||
+ evsel__tool_event(evsel) == TOOL_PMU__EVENT_USER_TIME)
+ xyarray__delete(evsel->start_times);
}
void evsel__delete(struct evsel *evsel)
{
+ if (!evsel)
+ return;
+
evsel__exit(evsel);
free(evsel);
}
-void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
+void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
struct perf_counts_values *count)
{
struct perf_counts_values tmp;
@@ -1395,71 +1709,105 @@ void evsel__compute_deltas(struct evsel *evsel, int cpu, int thread,
if (!evsel->prev_raw_counts)
return;
- if (cpu == -1) {
- tmp = evsel->prev_raw_counts->aggr;
- evsel->prev_raw_counts->aggr = *count;
- } else {
- tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
- *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
- }
+ tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
+ *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
count->val = count->val - tmp.val;
count->ena = count->ena - tmp.ena;
count->run = count->run - tmp.run;
}
-void perf_counts_values__scale(struct perf_counts_values *count,
- bool scale, s8 *pscaled)
+static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
{
- s8 scaled = 0;
+ struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
- if (scale) {
- if (count->run == 0) {
- scaled = -1;
- count->val = 0;
- } else if (count->run < count->ena) {
- scaled = 1;
- count->val = (u64)((double) count->val * count->ena / count->run);
- }
+ return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
+}
+
+static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
+ u64 val, u64 ena, u64 run, u64 lost)
+{
+ struct perf_counts_values *count;
+
+ count = perf_counts(counter->counts, cpu_map_idx, thread);
+
+ if (evsel__is_retire_lat(counter)) {
+ evsel__tpebs_read(counter, cpu_map_idx, thread);
+ perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
+ return;
}
- if (pscaled)
- *pscaled = scaled;
+ count->val = val;
+ count->ena = ena;
+ count->run = run;
+ count->lost = lost;
+
+ perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
}
-static int evsel__read_one(struct evsel *evsel, int cpu, int thread)
+static bool evsel__group_has_tpebs(struct evsel *leader)
{
- struct perf_counts_values *count = perf_counts(evsel->counts, cpu, thread);
+ struct evsel *evsel;
- return perf_evsel__read(&evsel->core, cpu, thread, count);
+ for_each_group_evsel(evsel, leader) {
+ if (evsel__is_retire_lat(evsel))
+ return true;
+ }
+ return false;
}
-static void
-perf_evsel__set_count(struct evsel *counter, int cpu, int thread,
- u64 val, u64 ena, u64 run)
+static u64 evsel__group_read_nr_members(struct evsel *leader)
{
- struct perf_counts_values *count;
+ u64 nr = leader->core.nr_members;
+ struct evsel *evsel;
- count = perf_counts(counter->counts, cpu, thread);
+ for_each_group_evsel(evsel, leader) {
+ if (evsel__is_retire_lat(evsel))
+ nr--;
+ }
+ return nr;
+}
- count->val = val;
- count->ena = ena;
- count->run = run;
+static u64 evsel__group_read_size(struct evsel *leader)
+{
+ u64 read_format = leader->core.attr.read_format;
+ int entry = sizeof(u64); /* value */
+ int size = 0;
+ int nr = 1;
+
+ if (!evsel__group_has_tpebs(leader))
+ return perf_evsel__read_size(&leader->core);
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
+ size += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
+ size += sizeof(u64);
- perf_counts__set_loaded(counter->counts, cpu, thread, true);
+ if (read_format & PERF_FORMAT_ID)
+ entry += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_LOST)
+ entry += sizeof(u64);
+
+ if (read_format & PERF_FORMAT_GROUP) {
+ nr = evsel__group_read_nr_members(leader);
+ size += sizeof(u64);
+ }
+
+ size += entry * nr;
+ return size;
}
-static int
-perf_evsel__process_group_data(struct evsel *leader,
- int cpu, int thread, u64 *data)
+static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
{
u64 read_format = leader->core.attr.read_format;
struct sample_read_value *v;
- u64 nr, ena = 0, run = 0, i;
+ u64 nr, ena = 0, run = 0, lost = 0;
nr = *data++;
- if (nr != (u64) leader->core.nr_members)
+ if (nr != evsel__group_read_nr_members(leader))
return -EINVAL;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
@@ -1468,30 +1816,28 @@ perf_evsel__process_group_data(struct evsel *leader,
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
run = *data++;
- v = (struct sample_read_value *) data;
-
- perf_evsel__set_count(leader, cpu, thread,
- v[0].value, ena, run);
-
- for (i = 1; i < nr; i++) {
+ v = (void *)data;
+ sample_read_group__for_each(v, nr, read_format) {
struct evsel *counter;
- counter = perf_evlist__id2evsel(leader->evlist, v[i].id);
+ counter = evlist__id2evsel(leader->evlist, v->id);
if (!counter)
return -EINVAL;
- perf_evsel__set_count(counter, cpu, thread,
- v[i].value, ena, run);
+ if (read_format & PERF_FORMAT_LOST)
+ lost = v->lost;
+
+ evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
}
return 0;
}
-static int evsel__read_group(struct evsel *leader, int cpu, int thread)
+static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
{
struct perf_stat_evsel *ps = leader->stats;
u64 read_format = leader->core.attr.read_format;
- int size = perf_evsel__read_size(&leader->core);
+ int size = evsel__group_read_size(leader);
u64 *data = ps->group_data;
if (!(read_format & PERF_FORMAT_ID))
@@ -1508,48 +1854,94 @@ static int evsel__read_group(struct evsel *leader, int cpu, int thread)
ps->group_data = data;
}
- if (FD(leader, cpu, thread) < 0)
+ if (FD(leader, cpu_map_idx, thread) < 0)
return -EINVAL;
- if (readn(FD(leader, cpu, thread), data, size) <= 0)
+ if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0)
return -errno;
- return perf_evsel__process_group_data(leader, cpu, thread, data);
+ return evsel__process_group_data(leader, cpu_map_idx, thread, data);
}
-int evsel__read_counter(struct evsel *evsel, int cpu, int thread)
+bool __evsel__match(const struct evsel *evsel, u32 type, u64 config)
{
- u64 read_format = evsel->core.attr.read_format;
- if (read_format & PERF_FORMAT_GROUP)
- return evsel__read_group(evsel, cpu, thread);
+ u32 e_type = evsel->core.attr.type;
+ u64 e_config = evsel->core.attr.config;
+
+ if (e_type != type) {
+ return type == PERF_TYPE_HARDWARE && evsel->pmu && evsel->pmu->is_core &&
+ evsel->alternate_hw_config == config;
+ }
+
+ if ((type == PERF_TYPE_HARDWARE || type == PERF_TYPE_HW_CACHE) &&
+ perf_pmus__supports_extended_type())
+ e_config &= PERF_HW_EVENT_MASK;
- return evsel__read_one(evsel, cpu, thread);
+ return e_config == config;
}
-int __evsel__read_on_cpu(struct evsel *evsel, int cpu, int thread, bool scale)
+int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
+{
+ if (evsel__is_tool(evsel))
+ return evsel__tool_pmu_read(evsel, cpu_map_idx, thread);
+
+ if (evsel__is_hwmon(evsel))
+ return evsel__hwmon_pmu_read(evsel, cpu_map_idx, thread);
+
+ if (evsel__is_retire_lat(evsel))
+ return evsel__tpebs_read(evsel, cpu_map_idx, thread);
+
+ if (evsel->core.attr.read_format & PERF_FORMAT_GROUP)
+ return evsel__read_group(evsel, cpu_map_idx, thread);
+
+ return evsel__read_one(evsel, cpu_map_idx, thread);
+}
+
+int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
{
struct perf_counts_values count;
size_t nv = scale ? 3 : 1;
- if (FD(evsel, cpu, thread) < 0)
+ if (FD(evsel, cpu_map_idx, thread) < 0)
return -EINVAL;
- if (evsel->counts == NULL && evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
+ if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0)
return -ENOMEM;
- if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) <= 0)
+ if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
return -errno;
- evsel__compute_deltas(evsel, cpu, thread, &count);
+ evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
perf_counts_values__scale(&count, scale, NULL);
- *perf_counts(evsel->counts, cpu, thread) = count;
+ *perf_counts(evsel->counts, cpu_map_idx, thread) = count;
return 0;
}
-static int get_group_fd(struct evsel *evsel, int cpu, int thread)
+static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
+ int cpu_map_idx)
+{
+ struct perf_cpu cpu;
+
+ cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
+ return perf_cpu_map__idx(other->core.cpus, cpu);
+}
+
+static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx)
+{
+ struct evsel *leader = evsel__leader(evsel);
+
+ if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
+ (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
+ return evsel__match_other_cpu(evsel, leader, cpu_map_idx);
+ }
+
+ return cpu_map_idx;
+}
+
+static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
{
- struct evsel *leader = evsel->leader;
+ struct evsel *leader = evsel__leader(evsel);
int fd;
if (evsel__is_group_leader(evsel))
@@ -1561,15 +1953,21 @@ static int get_group_fd(struct evsel *evsel, int cpu, int thread)
*/
BUG_ON(!leader->core.fd);
- fd = FD(leader, cpu, thread);
- BUG_ON(fd == -1);
+ cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx);
+ if (cpu_map_idx == -1)
+ return -1;
+
+ fd = FD(leader, cpu_map_idx, thread);
+ BUG_ON(fd == -1 && !leader->skippable);
- return fd;
+ /*
+ * When the leader has been skipped, return -2 to distinguish from no
+ * group leader case.
+ */
+ return fd == -1 ? -2 : fd;
}
-static void perf_evsel__remove_fd(struct evsel *pos,
- int nr_cpus, int nr_threads,
- int thread_idx)
+static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
{
for (int cpu = 0; cpu < nr_cpus; cpu++)
for (int thread = thread_idx; thread < nr_threads - 1; thread++)
@@ -1577,18 +1975,18 @@ static void perf_evsel__remove_fd(struct evsel *pos,
}
static int update_fds(struct evsel *evsel,
- int nr_cpus, int cpu_idx,
+ int nr_cpus, int cpu_map_idx,
int nr_threads, int thread_idx)
{
struct evsel *pos;
- if (cpu_idx >= nr_cpus || thread_idx >= nr_threads)
+ if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads)
return -EINVAL;
evlist__for_each_entry(evsel->evlist, pos) {
- nr_cpus = pos != evsel ? nr_cpus : cpu_idx;
+ nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx;
- perf_evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
+ evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
/*
* Since fds for next evsel has not been created,
@@ -1600,10 +1998,10 @@ static int update_fds(struct evsel *evsel,
return 0;
}
-static bool ignore_missing_thread(struct evsel *evsel,
- int nr_cpus, int cpu,
- struct perf_thread_map *threads,
- int thread, int err)
+static bool evsel__ignore_missing_thread(struct evsel *evsel,
+ int nr_cpus, int cpu_map_idx,
+ struct perf_thread_map *threads,
+ int thread, int err)
{
pid_t ignore_pid = perf_thread_map__pid(threads, thread);
@@ -1626,7 +2024,7 @@ static bool ignore_missing_thread(struct evsel *evsel,
* We should remove fd for missing_thread first
* because thread_map__remove() will decrease threads->nr.
*/
- if (update_fds(evsel, nr_cpus, cpu, threads->nr, thread))
+ if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
return false;
if (thread_map__remove(threads, thread))
@@ -1653,61 +2051,46 @@ static void display_attr(struct perf_event_attr *attr)
}
}
-static int perf_event_open(struct evsel *evsel,
- pid_t pid, int cpu, int group_fd,
- unsigned long flags)
+bool evsel__precise_ip_fallback(struct evsel *evsel)
{
- int precise_ip = evsel->core.attr.precise_ip;
- int fd;
-
- while (1) {
- pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
- pid, cpu, group_fd, flags);
-
- fd = sys_perf_event_open(&evsel->core.attr, pid, cpu, group_fd, flags);
- if (fd >= 0)
- break;
-
- /* Do not try less precise if not requested. */
- if (!evsel->precise_max)
- break;
-
- /*
- * We tried all the precise_ip values, and it's
- * still failing, so leave it to standard fallback.
- */
- if (!evsel->core.attr.precise_ip) {
- evsel->core.attr.precise_ip = precise_ip;
- break;
- }
+ /* Do not try less precise if not requested. */
+ if (!evsel->precise_max)
+ return false;
- pr_debug2_peo("\nsys_perf_event_open failed, error %d\n", -ENOTSUP);
- evsel->core.attr.precise_ip--;
- pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
- display_attr(&evsel->core.attr);
+ /*
+ * We tried all the precise_ip values, and it's
+ * still failing, so leave it to standard fallback.
+ */
+ if (!evsel->core.attr.precise_ip) {
+ evsel->core.attr.precise_ip = evsel->precise_ip_original;
+ return false;
}
- return fd;
+ if (!evsel->precise_ip_original)
+ evsel->precise_ip_original = evsel->core.attr.precise_ip;
+
+ evsel->core.attr.precise_ip--;
+ pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
+ display_attr(&evsel->core.attr);
+ return true;
}
-static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
- struct perf_thread_map *threads,
- int start_cpu, int end_cpu)
+static struct perf_cpu_map *empty_cpu_map;
+static struct perf_thread_map *empty_thread_map;
+
+static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
+ struct perf_thread_map *threads)
{
- int cpu, thread, nthreads;
- unsigned long flags = PERF_FLAG_FD_CLOEXEC;
- int pid = -1, err, old_errno;
- enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
+ int ret = 0;
+ int nthreads = perf_thread_map__nr(threads);
if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
(perf_missing_features.aux_output && evsel->core.attr.aux_output))
return -EINVAL;
if (cpus == NULL) {
- static struct perf_cpu_map *empty_cpu_map;
-
if (empty_cpu_map == NULL) {
- empty_cpu_map = perf_cpu_map__dummy_new();
+ empty_cpu_map = perf_cpu_map__new_any_cpu();
if (empty_cpu_map == NULL)
return -ENOMEM;
}
@@ -1716,8 +2099,6 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
}
if (threads == NULL) {
- static struct perf_thread_map *empty_thread_map;
-
if (empty_thread_map == NULL) {
empty_thread_map = thread_map__new_by_tid(-1);
if (empty_thread_map == NULL)
@@ -1727,21 +2108,33 @@ static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
threads = empty_thread_map;
}
- if (evsel->core.system_wide)
- nthreads = 1;
- else
- nthreads = threads->nr;
-
if (evsel->core.fd == NULL &&
- perf_evsel__alloc_fd(&evsel->core, cpus->nr, nthreads) < 0)
+ perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
return -ENOMEM;
- if (evsel->cgrp) {
- flags |= PERF_FLAG_PID_CGROUP;
- pid = evsel->cgrp->fd;
- }
+ if (evsel__is_tool(evsel))
+ ret = evsel__tool_pmu_prepare_open(evsel, cpus, nthreads);
-fallback_missing_features:
+ evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
+ if (evsel->cgrp)
+ evsel->open_flags |= PERF_FLAG_PID_CGROUP;
+
+ return ret;
+}
+
+static void evsel__disable_missing_features(struct evsel *evsel)
+{
+ if (perf_missing_features.inherit_sample_read && evsel->core.attr.inherit &&
+ (evsel->core.attr.sample_type & PERF_SAMPLE_READ))
+ evsel->core.attr.inherit = 0;
+ if (perf_missing_features.branch_counters)
+ evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_COUNTERS;
+ if (perf_missing_features.read_lost)
+ evsel->core.attr.read_format &= ~PERF_FORMAT_LOST;
+ if (perf_missing_features.weight_struct) {
+ evsel__set_sample_bit(evsel, WEIGHT);
+ evsel__reset_sample_bit(evsel, WEIGHT_STRUCT);
+ }
if (perf_missing_features.clockid_wrong)
evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
if (perf_missing_features.clockid) {
@@ -1749,10 +2142,10 @@ fallback_missing_features:
evsel->core.attr.clockid = 0;
}
if (perf_missing_features.cloexec)
- flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
+ evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
if (perf_missing_features.mmap2)
evsel->core.attr.mmap2 = 0;
- if (perf_missing_features.exclude_guest)
+ if (evsel->pmu && evsel->pmu->missing_features.exclude_guest)
evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
if (perf_missing_features.lbr_flags)
evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
@@ -1765,56 +2158,504 @@ fallback_missing_features:
evsel->core.attr.bpf_event = 0;
if (perf_missing_features.branch_hw_idx)
evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX;
-retry_sample_id:
if (perf_missing_features.sample_id_all)
evsel->core.attr.sample_id_all = 0;
+}
+
+int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
+ struct perf_thread_map *threads)
+{
+ int err;
+
+ err = __evsel__prepare_open(evsel, cpus, threads);
+ if (err)
+ return err;
+
+ evsel__disable_missing_features(evsel);
+
+ return err;
+}
+
+static bool __has_attr_feature(struct perf_event_attr *attr,
+ struct perf_cpu cpu, unsigned long flags)
+{
+ int fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
+ /*group_fd=*/-1, flags);
+ close(fd);
+
+ if (fd < 0) {
+ attr->exclude_kernel = 1;
+
+ fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
+ /*group_fd=*/-1, flags);
+ close(fd);
+ }
+
+ if (fd < 0) {
+ attr->exclude_hv = 1;
+
+ fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
+ /*group_fd=*/-1, flags);
+ close(fd);
+ }
+
+ if (fd < 0) {
+ attr->exclude_guest = 1;
+
+ fd = syscall(SYS_perf_event_open, attr, /*pid=*/0, cpu.cpu,
+ /*group_fd=*/-1, flags);
+ close(fd);
+ }
+
+ attr->exclude_kernel = 0;
+ attr->exclude_guest = 0;
+ attr->exclude_hv = 0;
+
+ return fd >= 0;
+}
+
+static bool has_attr_feature(struct perf_event_attr *attr, unsigned long flags)
+{
+ struct perf_cpu cpu = {.cpu = -1};
+
+ return __has_attr_feature(attr, cpu, flags);
+}
+
+static void evsel__detect_missing_pmu_features(struct evsel *evsel)
+{
+ struct perf_event_attr attr = {
+ .type = evsel->core.attr.type,
+ .config = evsel->core.attr.config,
+ .disabled = 1,
+ };
+ struct perf_pmu *pmu = evsel->pmu;
+ int old_errno;
+
+ old_errno = errno;
+
+ if (pmu == NULL)
+ pmu = evsel->pmu = evsel__find_pmu(evsel);
+
+ if (pmu == NULL || pmu->missing_features.checked)
+ goto out;
+ /*
+ * Must probe features in the order they were added to the
+ * perf_event_attr interface. These are kernel core limitation but
+ * specific to PMUs with branch stack. So we can detect with the given
+ * hardware event and stop on the first one succeeded.
+ */
+
+ /* Please add new feature detection here. */
+
+ attr.exclude_guest = 1;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ pmu->missing_features.exclude_guest = true;
+ pr_debug2("switching off exclude_guest for PMU %s\n", pmu->name);
+
+found:
+ pmu->missing_features.checked = true;
+out:
+ errno = old_errno;
+}
+
+static void evsel__detect_missing_brstack_features(struct evsel *evsel)
+{
+ static bool detection_done = false;
+ struct perf_event_attr attr = {
+ .type = evsel->core.attr.type,
+ .config = evsel->core.attr.config,
+ .disabled = 1,
+ .sample_type = PERF_SAMPLE_BRANCH_STACK,
+ .sample_period = 1000,
+ };
+ int old_errno;
+
+ if (detection_done)
+ return;
+
+ old_errno = errno;
+
+ /*
+ * Must probe features in the order they were added to the
+ * perf_event_attr interface. These are PMU specific limitation
+ * so we can detect with the given hardware event and stop on the
+ * first one succeeded.
+ */
+
+ /* Please add new feature detection here. */
+
+ attr.branch_sample_type = PERF_SAMPLE_BRANCH_COUNTERS;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.branch_counters = true;
+ pr_debug2("switching off branch counters support\n");
+
+ attr.branch_sample_type = PERF_SAMPLE_BRANCH_HW_INDEX;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.branch_hw_idx = true;
+ pr_debug2("switching off branch HW index support\n");
+
+ attr.branch_sample_type = PERF_SAMPLE_BRANCH_NO_CYCLES | PERF_SAMPLE_BRANCH_NO_FLAGS;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.lbr_flags = true;
+ pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
+
+found:
+ detection_done = true;
+ errno = old_errno;
+}
+
+static bool evsel__probe_aux_action(struct evsel *evsel, struct perf_cpu cpu)
+{
+ struct perf_event_attr attr = evsel->core.attr;
+ int old_errno = errno;
+
+ attr.disabled = 1;
+ attr.aux_start_paused = 1;
+
+ if (__has_attr_feature(&attr, cpu, /*flags=*/0)) {
+ errno = old_errno;
+ return true;
+ }
+
+ /*
+ * EOPNOTSUPP means the kernel supports the feature but the PMU does
+ * not, so keep that distinction if possible.
+ */
+ if (errno != EOPNOTSUPP)
+ errno = old_errno;
+
+ return false;
+}
+
+static void evsel__detect_missing_aux_action_feature(struct evsel *evsel, struct perf_cpu cpu)
+{
+ static bool detection_done;
+ struct evsel *leader;
+
+ /*
+ * Don't bother probing aux_action if it is not being used or has been
+ * probed before.
+ */
+ if (!evsel->core.attr.aux_action || detection_done)
+ return;
+
+ detection_done = true;
+
+ /*
+ * The leader is an AUX area event. If it has failed, assume the feature
+ * is not supported.
+ */
+ leader = evsel__leader(evsel);
+ if (evsel == leader) {
+ perf_missing_features.aux_action = true;
+ return;
+ }
+
+ /*
+ * AUX area event with aux_action must have been opened successfully
+ * already, so feature is supported.
+ */
+ if (leader->core.attr.aux_action)
+ return;
+
+ if (!evsel__probe_aux_action(leader, cpu))
+ perf_missing_features.aux_action = true;
+}
+
+static bool evsel__detect_missing_features(struct evsel *evsel, struct perf_cpu cpu)
+{
+ static bool detection_done = false;
+ struct perf_event_attr attr = {
+ .type = PERF_TYPE_SOFTWARE,
+ .config = PERF_COUNT_SW_TASK_CLOCK,
+ .disabled = 1,
+ };
+ int old_errno;
+
+ evsel__detect_missing_aux_action_feature(evsel, cpu);
+
+ evsel__detect_missing_pmu_features(evsel);
+
+ if (evsel__has_br_stack(evsel))
+ evsel__detect_missing_brstack_features(evsel);
+
+ if (detection_done)
+ goto check;
+
+ old_errno = errno;
+
+ /*
+ * Must probe features in the order they were added to the
+ * perf_event_attr interface. These are kernel core limitation
+ * not PMU-specific so we can detect with a software event and
+ * stop on the first one succeeded.
+ */
+
+ /* Please add new feature detection here. */
+
+ attr.inherit = true;
+ attr.sample_type = PERF_SAMPLE_READ;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.inherit_sample_read = true;
+ pr_debug2("Using PERF_SAMPLE_READ / :S modifier is not compatible with inherit, falling back to no-inherit.\n");
+ attr.inherit = false;
+ attr.sample_type = 0;
+
+ attr.read_format = PERF_FORMAT_LOST;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.read_lost = true;
+ pr_debug2("switching off PERF_FORMAT_LOST support\n");
+ attr.read_format = 0;
+
+ attr.sample_type = PERF_SAMPLE_WEIGHT_STRUCT;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.weight_struct = true;
+ pr_debug2("switching off weight struct support\n");
+ attr.sample_type = 0;
+
+ attr.sample_type = PERF_SAMPLE_CODE_PAGE_SIZE;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.code_page_size = true;
+ pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support\n");
+ attr.sample_type = 0;
+
+ attr.sample_type = PERF_SAMPLE_DATA_PAGE_SIZE;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.data_page_size = true;
+ pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support\n");
+ attr.sample_type = 0;
+
+ attr.cgroup = 1;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.cgroup = true;
+ pr_debug2_peo("Kernel has no cgroup sampling support\n");
+ attr.cgroup = 0;
+
+ attr.aux_output = 1;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.aux_output = true;
+ pr_debug2_peo("Kernel has no attr.aux_output support\n");
+ attr.aux_output = 0;
+
+ attr.bpf_event = 1;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.bpf = true;
+ pr_debug2_peo("switching off bpf_event\n");
+ attr.bpf_event = 0;
+
+ attr.ksymbol = 1;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.ksymbol = true;
+ pr_debug2_peo("switching off ksymbol\n");
+ attr.ksymbol = 0;
+
+ attr.write_backward = 1;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.write_backward = true;
+ pr_debug2_peo("switching off write_backward\n");
+ attr.write_backward = 0;
+
+ attr.use_clockid = 1;
+ attr.clockid = CLOCK_MONOTONIC;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.clockid = true;
+ pr_debug2_peo("switching off clockid\n");
+ attr.use_clockid = 0;
+ attr.clockid = 0;
+
+ if (has_attr_feature(&attr, /*flags=*/PERF_FLAG_FD_CLOEXEC))
+ goto found;
+ perf_missing_features.cloexec = true;
+ pr_debug2_peo("switching off cloexec flag\n");
+
+ attr.mmap2 = 1;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.mmap2 = true;
+ pr_debug2_peo("switching off mmap2\n");
+ attr.mmap2 = 0;
+
+ /* set this unconditionally? */
+ perf_missing_features.sample_id_all = true;
+ pr_debug2_peo("switching off sample_id_all\n");
+
+ attr.inherit = 1;
+ attr.read_format = PERF_FORMAT_GROUP;
+ if (has_attr_feature(&attr, /*flags=*/0))
+ goto found;
+ perf_missing_features.group_read = true;
+ pr_debug2_peo("switching off group read\n");
+ attr.inherit = 0;
+ attr.read_format = 0;
+
+found:
+ detection_done = true;
+ errno = old_errno;
+
+check:
+ if (evsel->core.attr.inherit &&
+ (evsel->core.attr.sample_type & PERF_SAMPLE_READ) &&
+ perf_missing_features.inherit_sample_read)
+ return true;
+
+ if ((evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS) &&
+ perf_missing_features.branch_counters)
+ return true;
+
+ if ((evsel->core.attr.read_format & PERF_FORMAT_LOST) &&
+ perf_missing_features.read_lost)
+ return true;
+
+ if ((evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT) &&
+ perf_missing_features.weight_struct)
+ return true;
+
+ if (evsel->core.attr.use_clockid && evsel->core.attr.clockid != CLOCK_MONOTONIC &&
+ !perf_missing_features.clockid) {
+ perf_missing_features.clockid_wrong = true;
+ return true;
+ }
+
+ if (evsel->core.attr.use_clockid && perf_missing_features.clockid)
+ return true;
+
+ if ((evsel->open_flags & PERF_FLAG_FD_CLOEXEC) &&
+ perf_missing_features.cloexec)
+ return true;
+
+ if (evsel->core.attr.mmap2 && perf_missing_features.mmap2)
+ return true;
+
+ if ((evsel->core.attr.branch_sample_type & (PERF_SAMPLE_BRANCH_NO_FLAGS |
+ PERF_SAMPLE_BRANCH_NO_CYCLES)) &&
+ perf_missing_features.lbr_flags)
+ return true;
+
+ if (evsel->core.attr.inherit && (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
+ perf_missing_features.group_read)
+ return true;
+
+ if (evsel->core.attr.ksymbol && perf_missing_features.ksymbol)
+ return true;
+
+ if (evsel->core.attr.bpf_event && perf_missing_features.bpf)
+ return true;
+
+ if ((evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX) &&
+ perf_missing_features.branch_hw_idx)
+ return true;
+
+ if (evsel->core.attr.sample_id_all && perf_missing_features.sample_id_all)
+ return true;
+
+ return false;
+}
+
+static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
+ struct perf_thread_map *threads,
+ int start_cpu_map_idx, int end_cpu_map_idx)
+{
+ int idx, thread, nthreads;
+ int pid = -1, err, old_errno;
+ enum rlimit_action set_rlimit = NO_CHANGE;
+ struct perf_cpu cpu;
+
+ if (evsel__is_retire_lat(evsel))
+ return evsel__tpebs_open(evsel);
+
+ err = __evsel__prepare_open(evsel, cpus, threads);
+ if (err)
+ return err;
+
+ if (cpus == NULL)
+ cpus = empty_cpu_map;
+
+ if (threads == NULL)
+ threads = empty_thread_map;
+
+ nthreads = perf_thread_map__nr(threads);
+
+ if (evsel->cgrp)
+ pid = evsel->cgrp->fd;
+
+fallback_missing_features:
+ evsel__disable_missing_features(evsel);
+
+ pr_debug3("Opening: %s\n", evsel__name(evsel));
display_attr(&evsel->core.attr);
- for (cpu = start_cpu; cpu < end_cpu; cpu++) {
+ if (evsel__is_tool(evsel)) {
+ return evsel__tool_pmu_open(evsel, threads,
+ start_cpu_map_idx,
+ end_cpu_map_idx);
+ }
+ if (evsel__is_hwmon(evsel)) {
+ return evsel__hwmon_pmu_open(evsel, threads,
+ start_cpu_map_idx,
+ end_cpu_map_idx);
+ }
+
+ for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
+ cpu = perf_cpu_map__cpu(cpus, idx);
for (thread = 0; thread < nthreads; thread++) {
int fd, group_fd;
+retry_open:
+ if (thread >= nthreads)
+ break;
if (!evsel->cgrp && !evsel->core.system_wide)
pid = perf_thread_map__pid(threads, thread);
- group_fd = get_group_fd(evsel, cpu, thread);
-retry_open:
- test_attr__ready();
+ group_fd = get_group_fd(evsel, idx, thread);
- fd = perf_event_open(evsel, pid, cpus->map[cpu],
- group_fd, flags);
+ if (group_fd == -2) {
+ pr_debug("broken group leader for %s\n", evsel->name);
+ err = -EINVAL;
+ goto out_close;
+ }
- FD(evsel, cpu, thread) = fd;
+ /* Debug message used by test scripts */
+ pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
+ pid, cpu.cpu, group_fd, evsel->open_flags);
- if (unlikely(test_attr__enabled)) {
- test_attr__open(&evsel->core.attr, pid, cpus->map[cpu],
- fd, group_fd, flags);
- }
+ fd = sys_perf_event_open(&evsel->core.attr, pid, cpu.cpu,
+ group_fd, evsel->open_flags);
+
+ FD(evsel, idx, thread) = fd;
if (fd < 0) {
err = -errno;
- if (ignore_missing_thread(evsel, cpus->nr, cpu, threads, thread, err)) {
- /*
- * We just removed 1 thread, so take a step
- * back on thread index and lower the upper
- * nthreads limit.
- */
- nthreads--;
- thread--;
-
- /* ... and pretend like nothing have happened. */
- err = 0;
- continue;
- }
-
pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
err);
goto try_fallback;
}
+ bpf_counter__install_pe(evsel, idx, fd);
+
+ if (unlikely(test_attr__enabled())) {
+ test_attr__open(&evsel->core.attr, pid, cpu,
+ fd, group_fd, evsel->open_flags);
+ }
+
+ /* Debug message used by test scripts */
pr_debug2_peo(" = %d\n", fd);
if (evsel->bpf_fd >= 0) {
@@ -1849,102 +2690,28 @@ retry_open:
return 0;
try_fallback:
+ if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
+ idx, threads, thread, err)) {
+ /* We just removed 1 thread, so lower the upper nthreads limit. */
+ nthreads--;
+
+ /* ... and pretend like nothing have happened. */
+ err = 0;
+ goto retry_open;
+ }
/*
* perf stat needs between 5 and 22 fds per CPU. When we run out
* of them try to increase the limits.
*/
- if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
- struct rlimit l;
-
- old_errno = errno;
- if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
- if (set_rlimit == NO_CHANGE)
- l.rlim_cur = l.rlim_max;
- else {
- l.rlim_cur = l.rlim_max + 1000;
- l.rlim_max = l.rlim_cur;
- }
- if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
- set_rlimit++;
- errno = old_errno;
- goto retry_open;
- }
- }
- errno = old_errno;
- }
-
- if (err != -EINVAL || cpu > 0 || thread > 0)
- goto out_close;
+ if (err == -EMFILE && rlimit__increase_nofile(&set_rlimit))
+ goto retry_open;
- /*
- * Must probe features in the order they were added to the
- * perf_event_attr interface.
- */
- if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) {
- perf_missing_features.cgroup = true;
- pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n");
- goto out_close;
- } else if (!perf_missing_features.branch_hw_idx &&
- (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) {
- perf_missing_features.branch_hw_idx = true;
- pr_debug2("switching off branch HW index support\n");
- goto fallback_missing_features;
- } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
- perf_missing_features.aux_output = true;
- pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n");
- goto out_close;
- } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
- perf_missing_features.bpf = true;
- pr_debug2_peo("switching off bpf_event\n");
- goto fallback_missing_features;
- } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
- perf_missing_features.ksymbol = true;
- pr_debug2_peo("switching off ksymbol\n");
- goto fallback_missing_features;
- } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
- perf_missing_features.write_backward = true;
- pr_debug2_peo("switching off write_backward\n");
- goto out_close;
- } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
- perf_missing_features.clockid_wrong = true;
- pr_debug2_peo("switching off clockid\n");
- goto fallback_missing_features;
- } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
- perf_missing_features.clockid = true;
- pr_debug2_peo("switching off use_clockid\n");
- goto fallback_missing_features;
- } else if (!perf_missing_features.cloexec && (flags & PERF_FLAG_FD_CLOEXEC)) {
- perf_missing_features.cloexec = true;
- pr_debug2_peo("switching off cloexec flag\n");
- goto fallback_missing_features;
- } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
- perf_missing_features.mmap2 = true;
- pr_debug2_peo("switching off mmap2\n");
- goto fallback_missing_features;
- } else if (!perf_missing_features.exclude_guest &&
- (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host)) {
- perf_missing_features.exclude_guest = true;
- pr_debug2_peo("switching off exclude_guest, exclude_host\n");
+ if (err == -EINVAL && evsel__detect_missing_features(evsel, cpu))
goto fallback_missing_features;
- } else if (!perf_missing_features.sample_id_all) {
- perf_missing_features.sample_id_all = true;
- pr_debug2_peo("switching off sample_id_all\n");
- goto retry_sample_id;
- } else if (!perf_missing_features.lbr_flags &&
- (evsel->core.attr.branch_sample_type &
- (PERF_SAMPLE_BRANCH_NO_CYCLES |
- PERF_SAMPLE_BRANCH_NO_FLAGS))) {
- perf_missing_features.lbr_flags = true;
- pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
- goto fallback_missing_features;
- } else if (!perf_missing_features.group_read &&
- evsel->core.attr.inherit &&
- (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
- evsel__is_group_leader(evsel)) {
- perf_missing_features.group_read = true;
- pr_debug2_peo("switching off group read\n");
- goto fallback_missing_features;
- }
+
+ if (evsel__precise_ip_fallback(evsel))
+ goto retry_open;
+
out_close:
if (err)
threads->err_thread = thread;
@@ -1952,12 +2719,12 @@ out_close:
old_errno = errno;
do {
while (--thread >= 0) {
- if (FD(evsel, cpu, thread) >= 0)
- close(FD(evsel, cpu, thread));
- FD(evsel, cpu, thread) = -1;
+ if (FD(evsel, idx, thread) >= 0)
+ close(FD(evsel, idx, thread));
+ FD(evsel, idx, thread) = -1;
}
thread = nthreads;
- } while (--cpu >= 0);
+ } while (--idx >= 0);
errno = old_errno;
return err;
}
@@ -1965,22 +2732,23 @@ out_close:
int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
struct perf_thread_map *threads)
{
- return evsel__open_cpu(evsel, cpus, threads, 0, cpus ? cpus->nr : 1);
+ return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
}
void evsel__close(struct evsel *evsel)
{
+ if (evsel__is_retire_lat(evsel))
+ evsel__tpebs_close(evsel);
perf_evsel__close(&evsel->core);
perf_evsel__free_id(&evsel->core);
}
-int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu)
+int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
{
- if (cpu == -1)
- return evsel__open_cpu(evsel, cpus, NULL, 0,
- cpus ? cpus->nr : 1);
+ if (cpu_map_idx == -1)
+ return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
- return evsel__open_cpu(evsel, cpus, NULL, cpu, cpu + 1);
+ return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
}
int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
@@ -2078,6 +2846,113 @@ perf_event__check_size(union perf_event *event, unsigned int sample_size)
return 0;
}
+void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
+ const __u64 *array,
+ u64 type __maybe_unused)
+{
+ data->weight = *array;
+}
+
+u64 evsel__bitfield_swap_branch_flags(u64 value)
+{
+ u64 new_val = 0;
+
+ /*
+ * branch_flags
+ * union {
+ * u64 values;
+ * struct {
+ * mispred:1 //target mispredicted
+ * predicted:1 //target predicted
+ * in_tx:1 //in transaction
+ * abort:1 //transaction abort
+ * cycles:16 //cycle count to last branch
+ * type:4 //branch type
+ * spec:2 //branch speculation info
+ * new_type:4 //additional branch type
+ * priv:3 //privilege level
+ * reserved:31
+ * }
+ * }
+ *
+ * Avoid bswap64() the entire branch_flag.value,
+ * as it has variable bit-field sizes. Instead the
+ * macro takes the bit-field position/size,
+ * swaps it based on the host endianness.
+ */
+ if (host_is_bigendian()) {
+ new_val = bitfield_swap(value, 0, 1);
+ new_val |= bitfield_swap(value, 1, 1);
+ new_val |= bitfield_swap(value, 2, 1);
+ new_val |= bitfield_swap(value, 3, 1);
+ new_val |= bitfield_swap(value, 4, 16);
+ new_val |= bitfield_swap(value, 20, 4);
+ new_val |= bitfield_swap(value, 24, 2);
+ new_val |= bitfield_swap(value, 26, 4);
+ new_val |= bitfield_swap(value, 30, 3);
+ new_val |= bitfield_swap(value, 33, 31);
+ } else {
+ new_val = bitfield_swap(value, 63, 1);
+ new_val |= bitfield_swap(value, 62, 1);
+ new_val |= bitfield_swap(value, 61, 1);
+ new_val |= bitfield_swap(value, 60, 1);
+ new_val |= bitfield_swap(value, 44, 16);
+ new_val |= bitfield_swap(value, 40, 4);
+ new_val |= bitfield_swap(value, 38, 2);
+ new_val |= bitfield_swap(value, 34, 4);
+ new_val |= bitfield_swap(value, 31, 3);
+ new_val |= bitfield_swap(value, 0, 31);
+ }
+
+ return new_val;
+}
+
+static inline bool evsel__has_branch_counters(const struct evsel *evsel)
+{
+ struct evsel *leader = evsel__leader(evsel);
+
+ /* The branch counters feature only supports group */
+ if (!leader || !evsel->evlist)
+ return false;
+
+ if (evsel->evlist->nr_br_cntr < 0)
+ evlist__update_br_cntr(evsel->evlist);
+
+ if (leader->br_cntr_nr > 0)
+ return true;
+
+ return false;
+}
+
+static int __set_offcpu_sample(struct perf_sample *data)
+{
+ u64 *array = data->raw_data;
+ u32 max_size = data->raw_size, *p32;
+ const void *endp = (void *)array + max_size;
+
+ if (array == NULL)
+ return -EFAULT;
+
+ OVERFLOW_CHECK_u64(array);
+ p32 = (void *)array++;
+ data->pid = p32[0];
+ data->tid = p32[1];
+
+ OVERFLOW_CHECK_u64(array);
+ data->period = *array++;
+
+ OVERFLOW_CHECK_u64(array);
+ data->callchain = (struct ip_callchain *)array++;
+ OVERFLOW_CHECK(array, data->callchain->nr * sizeof(u64), max_size);
+ data->ip = data->callchain->ips[1];
+ array += data->callchain->nr;
+
+ OVERFLOW_CHECK_u64(array);
+ data->cgroup = *array;
+
+ return 0;
+}
+
int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
struct perf_sample *data)
{
@@ -2100,8 +2975,8 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
data->period = evsel->core.attr.sample_period;
data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
data->misc = event->header.misc;
- data->id = -1ULL;
data->data_src = PERF_MEM_DATA_SRC_NONE;
+ data->vcpu = -1;
if (event->header.type != PERF_RECORD_SAMPLE) {
if (!evsel->core.attr.sample_id_all)
@@ -2206,8 +3081,8 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
if (data->read.group.nr > max_group_nr)
return -EFAULT;
- sz = data->read.group.nr *
- sizeof(struct sample_read_value);
+
+ sz = data->read.group.nr * sample_read_value_size(read_format);
OVERFLOW_CHECK(array, sz, max_size);
data->read.group.values =
(struct sample_read_value *)array;
@@ -2216,6 +3091,12 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
OVERFLOW_CHECK_u64(array);
data->read.one.id = *array;
array++;
+
+ if (read_format & PERF_FORMAT_LOST) {
+ OVERFLOW_CHECK_u64(array);
+ data->read.one.lost = *array;
+ array++;
+ }
}
}
@@ -2238,7 +3119,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
/*
* Undo swap of u64, then swap on individual u32s,
* get the size of the raw area and undo all of the
- * swap. The pevent interface handles endianity by
+ * swap. The pevent interface handles endianness by
* itself.
*/
if (swapped) {
@@ -2265,6 +3146,8 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
if (type & PERF_SAMPLE_BRANCH_STACK) {
const u64 max_branch_nr = UINT64_MAX /
sizeof(struct branch_entry);
+ struct branch_entry *e;
+ unsigned int i;
OVERFLOW_CHECK_u64(array);
data->branch_stack = (struct branch_stack *)array++;
@@ -2273,26 +3156,59 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
return -EFAULT;
sz = data->branch_stack->nr * sizeof(struct branch_entry);
- if (evsel__has_branch_hw_idx(evsel))
+ if (evsel__has_branch_hw_idx(evsel)) {
sz += sizeof(u64);
- else
+ e = &data->branch_stack->entries[0];
+ } else {
data->no_hw_idx = true;
+ /*
+ * if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied,
+ * only nr and entries[] will be output by kernel.
+ */
+ e = (struct branch_entry *)&data->branch_stack->hw_idx;
+ }
+
+ if (swapped) {
+ /*
+ * struct branch_flag does not have endian
+ * specific bit field definition. And bswap
+ * will not resolve the issue, since these
+ * are bit fields.
+ *
+ * evsel__bitfield_swap_branch_flags() uses a
+ * bitfield_swap macro to swap the bit position
+ * based on the host endians.
+ */
+ for (i = 0; i < data->branch_stack->nr; i++, e++)
+ e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value);
+ }
+
OVERFLOW_CHECK(array, sz, max_size);
array = (void *)array + sz;
+
+ if (evsel__has_branch_counters(evsel)) {
+ data->branch_stack_cntr = (u64 *)array;
+ sz = data->branch_stack->nr * sizeof(u64);
+
+ OVERFLOW_CHECK(array, sz, max_size);
+ array = (void *)array + sz;
+ }
}
if (type & PERF_SAMPLE_REGS_USER) {
+ struct regs_dump *regs = perf_sample__user_regs(data);
+
OVERFLOW_CHECK_u64(array);
- data->user_regs.abi = *array;
+ regs->abi = *array;
array++;
- if (data->user_regs.abi) {
+ if (regs->abi) {
u64 mask = evsel->core.attr.sample_regs_user;
sz = hweight64(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
- data->user_regs.mask = mask;
- data->user_regs.regs = (u64 *)array;
+ regs->mask = mask;
+ regs->regs = (u64 *)array;
array = (void *)array + sz;
}
}
@@ -2318,9 +3234,9 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
}
}
- if (type & PERF_SAMPLE_WEIGHT) {
+ if (type & PERF_SAMPLE_WEIGHT_TYPE) {
OVERFLOW_CHECK_u64(array);
- data->weight = *array;
+ arch_perf_parse_sample_weight(data, array, type);
array++;
}
@@ -2336,19 +3252,20 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array++;
}
- data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
if (type & PERF_SAMPLE_REGS_INTR) {
+ struct regs_dump *regs = perf_sample__intr_regs(data);
+
OVERFLOW_CHECK_u64(array);
- data->intr_regs.abi = *array;
+ regs->abi = *array;
array++;
- if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
+ if (regs->abi != PERF_SAMPLE_REGS_ABI_NONE) {
u64 mask = evsel->core.attr.sample_regs_intr;
sz = hweight64(mask) * sizeof(u64);
OVERFLOW_CHECK(array, sz, max_size);
- data->intr_regs.mask = mask;
- data->intr_regs.regs = (u64 *)array;
+ regs->mask = mask;
+ regs->regs = (u64 *)array;
array = (void *)array + sz;
}
}
@@ -2365,6 +3282,18 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array++;
}
+ data->data_page_size = 0;
+ if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
+ data->data_page_size = *array;
+ array++;
+ }
+
+ data->code_page_size = 0;
+ if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
+ data->code_page_size = *array;
+ array++;
+ }
+
if (type & PERF_SAMPLE_AUX) {
OVERFLOW_CHECK_u64(array);
sz = *array++;
@@ -2378,6 +3307,9 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
array = (void *)array + sz;
}
+ if (evsel__is_offcpu_event(evsel))
+ return __set_offcpu_sample(data);
+
return 0;
}
@@ -2424,9 +3356,45 @@ int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
return 0;
}
+u16 evsel__id_hdr_size(const struct evsel *evsel)
+{
+ u64 sample_type = evsel->core.attr.sample_type;
+ u16 size = 0;
+
+ if (sample_type & PERF_SAMPLE_TID)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_TIME)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_ID)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_STREAM_ID)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_CPU)
+ size += sizeof(u64);
+
+ if (sample_type & PERF_SAMPLE_IDENTIFIER)
+ size += sizeof(u64);
+
+ return size;
+}
+
+#ifdef HAVE_LIBTRACEEVENT
struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
{
- return tep_find_field(evsel->tp_format, name);
+ struct tep_event *tp_format = evsel__tp_format(evsel);
+
+ return tp_format ? tep_find_field(tp_format, name) : NULL;
+}
+
+struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name)
+{
+ struct tep_event *tp_format = evsel__tp_format(evsel);
+
+ return tp_format ? tep_find_common_field(tp_format, name) : NULL;
}
void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
@@ -2442,6 +3410,8 @@ void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char
if (field->flags & TEP_FIELD_IS_DYNAMIC) {
offset = *(int *)(sample->raw_data + field->offset);
offset &= 0xffff;
+ if (tep_field_is_relative(field->flags))
+ offset += field->offset + field->size;
}
return sample->raw_data + offset;
@@ -2490,13 +3460,53 @@ u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *n
{
struct tep_format_field *field = evsel__field(evsel, name);
- if (!field)
- return 0;
+ return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
+}
+
+u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name)
+{
+ struct tep_format_field *field = evsel__common_field(evsel, name);
return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
}
-bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
+char evsel__taskstate(struct evsel *evsel, struct perf_sample *sample, const char *name)
+{
+ static struct tep_format_field *prev_state_field;
+ static const char *states;
+ struct tep_format_field *field;
+ unsigned long long val;
+ unsigned int bit;
+ char state = '?'; /* '?' denotes unknown task state */
+
+ field = evsel__field(evsel, name);
+
+ if (!field)
+ return state;
+
+ if (!states || field != prev_state_field) {
+ states = parse_task_states(field);
+ if (!states)
+ return state;
+ prev_state_field = field;
+ }
+
+ /*
+ * Note since the kernel exposes TASK_REPORT_MAX to userspace
+ * to denote the 'preempted' state, we might as welll report
+ * 'R' for this case, which make senses to users as well.
+ *
+ * We can change this if we have a good reason in the future.
+ */
+ val = evsel__intval(evsel, sample, name);
+ bit = val ? ffs(val) : 0;
+ state = (!bit || bit > strlen(states)) ? 'R' : states[bit-1];
+ return state;
+}
+#endif
+
+bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
+ char *msg, size_t msgsize)
{
int paranoid;
@@ -2504,18 +3514,19 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
evsel->core.attr.type == PERF_TYPE_HARDWARE &&
evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
/*
- * If it's cycles then fall back to hrtimer based
- * cpu-clock-tick sw counter, which is always available even if
- * no PMU support.
+ * If it's cycles then fall back to hrtimer based cpu-clock sw
+ * counter, which is always available even if no PMU support.
*
* PPC returns ENXIO until 2.6.37 (behavior changed with commit
* b0a873e).
*/
- scnprintf(msg, msgsize, "%s",
-"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
-
evsel->core.attr.type = PERF_TYPE_SOFTWARE;
- evsel->core.attr.config = PERF_COUNT_SW_CPU_CLOCK;
+ evsel->core.attr.config = target__has_cpu(target)
+ ? PERF_COUNT_SW_CPU_CLOCK
+ : PERF_COUNT_SW_TASK_CLOCK;
+ scnprintf(msg, msgsize,
+ "The cycles event is not supported, trying to fall back to %s",
+ target__has_cpu(target) ? "cpu-clock" : "task-clock");
zfree(&evsel->name);
return true;
@@ -2537,8 +3548,7 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
if (asprintf(&new_name, "%s%su", name, sep) < 0)
return false;
- if (evsel->name)
- free(evsel->name);
+ free(evsel->name);
evsel->name = new_name;
scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
"to fall back to excluding kernel and hypervisor "
@@ -2547,6 +3557,27 @@ bool evsel__fallback(struct evsel *evsel, int err, char *msg, size_t msgsize)
evsel->core.attr.exclude_hv = 1;
return true;
+ } else if (err == EOPNOTSUPP && !evsel->core.attr.exclude_guest &&
+ !evsel->exclude_GH) {
+ const char *name = evsel__name(evsel);
+ char *new_name;
+ const char *sep = ":";
+
+ /* Is there already the separator in the name. */
+ if (strchr(name, '/') ||
+ (strchr(name, ':') && !evsel->is_libpfm_event))
+ sep = "";
+
+ if (asprintf(&new_name, "%s%sH", name, sep) < 0)
+ return false;
+
+ free(evsel->name);
+ evsel->name = new_name;
+ /* Apple M1 requires exclude_guest */
+ scnprintf(msg, msgsize, "trying to fall back to excluding guest samples");
+ evsel->core.attr.exclude_guest = 1;
+
+ return true;
}
return false;
@@ -2588,11 +3619,91 @@ static bool find_process(const char *name)
return ret ? false : true;
}
+static int dump_perf_event_processes(char *msg, size_t size)
+{
+ DIR *proc_dir;
+ struct dirent *proc_entry;
+ int printed = 0;
+
+ proc_dir = opendir(procfs__mountpoint());
+ if (!proc_dir)
+ return 0;
+
+ /* Walk through the /proc directory. */
+ while ((proc_entry = readdir(proc_dir)) != NULL) {
+ char buf[256];
+ DIR *fd_dir;
+ struct dirent *fd_entry;
+ int fd_dir_fd;
+
+ if (proc_entry->d_type != DT_DIR ||
+ !isdigit(proc_entry->d_name[0]) ||
+ strlen(proc_entry->d_name) > sizeof(buf) - 4)
+ continue;
+
+ scnprintf(buf, sizeof(buf), "%s/fd", proc_entry->d_name);
+ fd_dir_fd = openat(dirfd(proc_dir), buf, O_DIRECTORY);
+ if (fd_dir_fd == -1)
+ continue;
+ fd_dir = fdopendir(fd_dir_fd);
+ if (!fd_dir) {
+ close(fd_dir_fd);
+ continue;
+ }
+ while ((fd_entry = readdir(fd_dir)) != NULL) {
+ ssize_t link_size;
+
+ if (fd_entry->d_type != DT_LNK)
+ continue;
+ link_size = readlinkat(fd_dir_fd, fd_entry->d_name, buf, sizeof(buf));
+ if (link_size < 0)
+ continue;
+ /* Take care as readlink doesn't null terminate the string. */
+ if (!strncmp(buf, "anon_inode:[perf_event]", link_size)) {
+ int cmdline_fd;
+ ssize_t cmdline_size;
+
+ scnprintf(buf, sizeof(buf), "%s/cmdline", proc_entry->d_name);
+ cmdline_fd = openat(dirfd(proc_dir), buf, O_RDONLY);
+ if (cmdline_fd == -1)
+ continue;
+ cmdline_size = read(cmdline_fd, buf, sizeof(buf) - 1);
+ close(cmdline_fd);
+ if (cmdline_size < 0)
+ continue;
+ buf[cmdline_size] = '\0';
+ for (ssize_t i = 0; i < cmdline_size; i++) {
+ if (buf[i] == '\0')
+ buf[i] = ' ';
+ }
+
+ if (printed == 0)
+ printed += scnprintf(msg, size, "Possible processes:\n");
+
+ printed += scnprintf(msg + printed, size - printed,
+ "%s %s\n", proc_entry->d_name, buf);
+ break;
+ }
+ }
+ closedir(fd_dir);
+ }
+ closedir(proc_dir);
+ return printed;
+}
+
+int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
+ char *msg __maybe_unused,
+ size_t size __maybe_unused)
+{
+ return 0;
+}
+
int evsel__open_strerror(struct evsel *evsel, struct target *target,
int err, char *msg, size_t size)
{
char sbuf[STRERR_BUFSIZE];
int printed = 0, enforced = 0;
+ int ret;
switch (err) {
case EPERM:
@@ -2613,7 +3724,7 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
printed += scnprintf(msg, size,
"No permission to enable %s event.\n\n", evsel__name(evsel));
- return scnprintf(msg + printed, size - printed,
+ return printed + scnprintf(msg + printed, size - printed,
"Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
"access to performance monitoring and observability operations for processes\n"
"without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n"
@@ -2650,10 +3761,18 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
"No such device - did you specify an out-of-range profile CPU?");
break;
case EOPNOTSUPP:
+ if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)
+ return scnprintf(msg, size,
+ "%s: PMU Hardware or event type doesn't support branch stack sampling.",
+ evsel__name(evsel));
if (evsel->core.attr.aux_output)
return scnprintf(msg, size,
"%s: PMU Hardware doesn't support 'aux_output' feature",
evsel__name(evsel));
+ if (evsel->core.attr.aux_action)
+ return scnprintf(msg, size,
+ "%s: PMU Hardware doesn't support 'aux_action' feature",
+ evsel__name(evsel));
if (evsel->core.attr.sample_period != 0)
return scnprintf(msg, size,
"%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
@@ -2666,51 +3785,81 @@ int evsel__open_strerror(struct evsel *evsel, struct target *target,
return scnprintf(msg, size, "%s",
"No hardware sampling interrupt available.\n");
#endif
+ if (!target__has_cpu(target))
+ return scnprintf(msg, size,
+ "Unsupported event (%s) in per-thread mode, enable system wide with '-a'.",
+ evsel__name(evsel));
break;
case EBUSY:
if (find_process("oprofiled"))
return scnprintf(msg, size,
"The PMU counters are busy/taken by another profiler.\n"
"We found oprofile daemon running, please stop it and try again.");
+ printed += scnprintf(
+ msg, size,
+ "The PMU %s counters are busy and in use by another process.\n",
+ evsel->pmu ? evsel->pmu->name : "");
+ return printed + dump_perf_event_processes(msg + printed, size - printed);
break;
case EINVAL:
+ if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
+ return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel.");
+ if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size)
+ return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel.");
if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
if (perf_missing_features.clockid)
return scnprintf(msg, size, "clockid feature not supported.");
if (perf_missing_features.clockid_wrong)
return scnprintf(msg, size, "wrong clockid (%d).", clockid);
+ if (perf_missing_features.aux_action)
+ return scnprintf(msg, size, "The 'aux_action' feature is not supported, update the kernel.");
if (perf_missing_features.aux_output)
return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
+ if (!target__has_cpu(target))
+ return scnprintf(msg, size,
+ "Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
+ evsel__name(evsel));
+
break;
+ case ENODATA:
+ return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. "
+ "Please add an auxiliary event in front of the load latency event.");
default:
break;
}
+ ret = arch_evsel__open_strerror(evsel, msg, size);
+ if (ret)
+ return ret;
+
return scnprintf(msg, size,
"The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
- "/bin/dmesg | grep -i perf may provide additional information.\n",
+ "\"dmesg | grep -i perf\" may provide additional information.\n",
err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
}
struct perf_env *evsel__env(struct evsel *evsel)
{
- if (evsel && evsel->evlist)
+ if (evsel && evsel->evlist && evsel->evlist->env)
return evsel->evlist->env;
return &perf_env;
}
static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
{
- int cpu, thread;
+ int cpu_map_idx, thread;
- for (cpu = 0; cpu < xyarray__max_x(evsel->core.fd); cpu++) {
+ if (evsel__is_retire_lat(evsel))
+ return 0;
+
+ for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
thread++) {
- int fd = FD(evsel, cpu, thread);
+ int fd = FD(evsel, cpu_map_idx, thread);
if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
- cpu, thread, fd) < 0)
+ cpu_map_idx, thread, fd) < 0)
return -1;
}
}
@@ -2723,8 +3872,202 @@ int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
struct perf_cpu_map *cpus = evsel->core.cpus;
struct perf_thread_map *threads = evsel->core.threads;
- if (perf_evsel__alloc_id(&evsel->core, cpus->nr, threads->nr))
+ if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
return -ENOMEM;
return store_evsel_ids(evsel, evlist);
}
+
+void evsel__zero_per_pkg(struct evsel *evsel)
+{
+ struct hashmap_entry *cur;
+ size_t bkt;
+
+ if (evsel->per_pkg_mask) {
+ hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
+ zfree(&cur->pkey);
+
+ hashmap__clear(evsel->per_pkg_mask);
+ }
+}
+
+/**
+ * evsel__is_hybrid - does the evsel have a known PMU that is hybrid. Note, this
+ * will be false on hybrid systems for hardware and legacy
+ * cache events.
+ */
+bool evsel__is_hybrid(const struct evsel *evsel)
+{
+ if (!evsel->core.is_pmu_core)
+ return false;
+
+ return perf_pmus__num_core_pmus() > 1;
+}
+
+struct evsel *evsel__leader(const struct evsel *evsel)
+{
+ return container_of(evsel->core.leader, struct evsel, core);
+}
+
+bool evsel__has_leader(struct evsel *evsel, struct evsel *leader)
+{
+ return evsel->core.leader == &leader->core;
+}
+
+bool evsel__is_leader(struct evsel *evsel)
+{
+ return evsel__has_leader(evsel, evsel);
+}
+
+void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
+{
+ evsel->core.leader = &leader->core;
+}
+
+int evsel__source_count(const struct evsel *evsel)
+{
+ struct evsel *pos;
+ int count = 0;
+
+ evlist__for_each_entry(evsel->evlist, pos) {
+ if (pos->metric_leader == evsel)
+ count++;
+ }
+ return count;
+}
+
+bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused)
+{
+ return false;
+}
+
+/*
+ * Remove an event from a given group (leader).
+ * Some events, e.g., perf metrics Topdown events,
+ * must always be grouped. Ignore the events.
+ */
+void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader)
+{
+ if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) {
+ evsel__set_leader(evsel, evsel);
+ evsel->core.nr_members = 0;
+ leader->core.nr_members--;
+ }
+}
+
+bool evsel__set_needs_uniquify(struct evsel *counter, const struct perf_stat_config *config)
+{
+ struct evsel *evsel;
+
+ if (counter->needs_uniquify) {
+ /* Already set. */
+ return true;
+ }
+
+ if (counter->use_config_name || counter->is_libpfm_event) {
+ /* Original name will be used. */
+ return false;
+ }
+
+ if (!config->hybrid_merge && evsel__is_hybrid(counter)) {
+ /* Unique hybrid counters necessary. */
+ counter->needs_uniquify = true;
+ return true;
+ }
+
+ if (counter->core.attr.type < PERF_TYPE_MAX && counter->core.attr.type != PERF_TYPE_RAW) {
+ /* Legacy event, don't uniquify. */
+ return false;
+ }
+
+ if (counter->pmu && counter->pmu->is_core &&
+ counter->alternate_hw_config != PERF_COUNT_HW_MAX) {
+ /* A sysfs or json event replacing a legacy event, don't uniquify. */
+ return false;
+ }
+
+ if (config->aggr_mode == AGGR_NONE) {
+ /* Always unique with no aggregation. */
+ counter->needs_uniquify = true;
+ return true;
+ }
+
+ if (counter->first_wildcard_match != NULL) {
+ /*
+ * If stats are merged then only the first_wildcard_match is
+ * displayed, there is no need to uniquify this evsel as the
+ * name won't be shown.
+ */
+ return false;
+ }
+
+ /*
+ * Do other non-merged events in the evlist have the same name? If so
+ * uniquify is necessary.
+ */
+ evlist__for_each_entry(counter->evlist, evsel) {
+ if (evsel == counter || evsel->first_wildcard_match || evsel->pmu == counter->pmu)
+ continue;
+
+ if (evsel__name_is(counter, evsel__name(evsel))) {
+ counter->needs_uniquify = true;
+ return true;
+ }
+ }
+ return false;
+}
+
+void evsel__uniquify_counter(struct evsel *counter)
+{
+ const char *name, *pmu_name;
+ char *new_name, *config;
+ int ret;
+
+ /* No uniquification necessary. */
+ if (!counter->needs_uniquify)
+ return;
+
+ /* The evsel was already uniquified. */
+ if (counter->uniquified_name)
+ return;
+
+ /* Avoid checking to uniquify twice. */
+ counter->uniquified_name = true;
+
+ name = evsel__name(counter);
+ pmu_name = counter->pmu->name;
+ /* Already prefixed by the PMU name. */
+ if (!strncmp(name, pmu_name, strlen(pmu_name)))
+ return;
+
+ config = strchr(name, '/');
+ if (config) {
+ int len = config - name;
+
+ if (config[1] == '/') {
+ /* case: event// */
+ ret = asprintf(&new_name, "%s/%.*s/%s", pmu_name, len, name, config + 2);
+ } else {
+ /* case: event/.../ */
+ ret = asprintf(&new_name, "%s/%.*s,%s", pmu_name, len, name, config + 1);
+ }
+ } else {
+ config = strchr(name, ':');
+ if (config) {
+ /* case: event:.. */
+ int len = config - name;
+
+ ret = asprintf(&new_name, "%s/%.*s/%s", pmu_name, len, name, config + 1);
+ } else {
+ /* case: event */
+ ret = asprintf(&new_name, "%s/%s/", pmu_name, name);
+ }
+ }
+ if (ret > 0) {
+ free(counter->name);
+ counter->name = new_name;
+ } else {
+ /* ENOMEM from asprintf. */
+ counter->uniquified_name = false;
+ }
+}