diff options
Diffstat (limited to 'tools/perf/arch/x86/util')
-rw-r--r-- | tools/perf/arch/x86/util/Build | 6 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/archinsn.c | 11 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/cpuid.h | 34 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/event.c | 27 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/evlist.c | 93 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/evsel.c | 135 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/evsel.h | 7 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/group.c | 28 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/header.c | 27 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/intel-bts.c | 11 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/intel-pt.c | 133 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/iostat.c | 470 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/kvm-stat.c | 58 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/mem-events.c | 107 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/perf_regs.c | 16 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/pmu.c | 155 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/topdown.c | 126 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/topdown.h | 8 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/tsc.c | 130 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/unwind-libdw.c | 6 |
20 files changed, 1401 insertions, 187 deletions
diff --git a/tools/perf/arch/x86/util/Build b/tools/perf/arch/x86/util/Build index 47f9c56e744f..dbeb04cb336e 100644 --- a/tools/perf/arch/x86/util/Build +++ b/tools/perf/arch/x86/util/Build @@ -3,9 +3,13 @@ perf-y += tsc.o perf-y += pmu.o perf-y += kvm-stat.o perf-y += perf_regs.o -perf-y += group.o +perf-y += topdown.o perf-y += machine.o perf-y += event.o +perf-y += evlist.o +perf-y += mem-events.o +perf-y += evsel.o +perf-y += iostat.o perf-$(CONFIG_DWARF) += dwarf-regs.o perf-$(CONFIG_BPF_PROLOGUE) += dwarf-regs.o diff --git a/tools/perf/arch/x86/util/archinsn.c b/tools/perf/arch/x86/util/archinsn.c index 3e6791531ca5..546feda08428 100644 --- a/tools/perf/arch/x86/util/archinsn.c +++ b/tools/perf/arch/x86/util/archinsn.c @@ -1,17 +1,17 @@ // SPDX-License-Identifier: GPL-2.0 -#include "../../../../arch/x86/include/asm/insn.h" #include "archinsn.h" #include "event.h" #include "machine.h" #include "thread.h" #include "symbol.h" +#include "../../../../arch/x86/include/asm/insn.h" void arch_fetch_insn(struct perf_sample *sample, struct thread *thread, struct machine *machine) { struct insn insn; - int len; + int len, ret; bool is64bit = false; if (!sample->ip) @@ -19,8 +19,9 @@ void arch_fetch_insn(struct perf_sample *sample, len = thread__memcpy(thread, machine, sample->insn, sample->ip, sizeof(sample->insn), &is64bit); if (len <= 0) return; - insn_init(&insn, sample->insn, len, is64bit); - insn_get_length(&insn); - if (insn_complete(&insn) && insn.length <= len) + + ret = insn_decode(&insn, sample->insn, len, + is64bit ? INSN_MODE_64 : INSN_MODE_32); + if (ret >= 0 && insn.length <= len) sample->insn_len = insn.length; } diff --git a/tools/perf/arch/x86/util/cpuid.h b/tools/perf/arch/x86/util/cpuid.h new file mode 100644 index 000000000000..0a3ae0ace7e9 --- /dev/null +++ b/tools/perf/arch/x86/util/cpuid.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef PERF_CPUID_H +#define PERF_CPUID_H 1 + + +static inline void +cpuid(unsigned int op, unsigned int op2, unsigned int *a, unsigned int *b, + unsigned int *c, unsigned int *d) +{ + /* + * Preserve %ebx/%rbx register by either placing it in %rdi or saving it + * on the stack - x86-64 needs to avoid the stack red zone. In PIC + * compilations %ebx contains the address of the global offset + * table. %rbx is occasionally used to address stack variables in + * presence of dynamic allocas. + */ + asm( +#if defined(__x86_64__) + "mov %%rbx, %%rdi\n" + "cpuid\n" + "xchg %%rdi, %%rbx\n" +#else + "pushl %%ebx\n" + "cpuid\n" + "movl %%ebx, %%edi\n" + "popl %%ebx\n" +#endif + : "=a"(*a), "=D"(*b), "=c"(*c), "=d"(*d) + : "a"(op), "2"(op2)); +} + +void get_cpuid_0(char *vendor, unsigned int *lvl); + +#endif diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c index 047dc00eafa6..e670f3547581 100644 --- a/tools/perf/arch/x86/util/event.c +++ b/tools/perf/arch/x86/util/event.c @@ -18,7 +18,7 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, { int rc = 0; struct map *pos; - struct maps *kmaps = &machine->kmaps; + struct maps *kmaps = machine__kernel_maps(machine); union perf_event *event = zalloc(sizeof(event->mmap) + machine->id_hdr_size); @@ -75,3 +75,28 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, } #endif + +void arch_perf_parse_sample_weight(struct perf_sample *data, + const __u64 *array, u64 type) +{ + union perf_sample_weight weight; + + weight.full = *array; + if (type & PERF_SAMPLE_WEIGHT) + data->weight = weight.full; + else { + data->weight = weight.var1_dw; + data->ins_lat = weight.var2_w; + } +} + +void arch_perf_synthesize_sample_weight(const struct perf_sample *data, + __u64 *array, u64 type) +{ + *array = data->weight; + + if (type & PERF_SAMPLE_WEIGHT_STRUCT) { + *array &= 0xffffffff; + *array |= ((u64)data->ins_lat << 32); + } +} diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c new file mode 100644 index 000000000000..cb59ce9b9638 --- /dev/null +++ b/tools/perf/arch/x86/util/evlist.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include "util/pmu.h" +#include "util/evlist.h" +#include "util/parse-events.h" +#include "util/event.h" +#include "util/pmu-hybrid.h" +#include "topdown.h" + +static int ___evlist__add_default_attrs(struct evlist *evlist, + struct perf_event_attr *attrs, + size_t nr_attrs) +{ + struct perf_cpu_map *cpus; + struct evsel *evsel, *n; + struct perf_pmu *pmu; + LIST_HEAD(head); + size_t i = 0; + + for (i = 0; i < nr_attrs; i++) + event_attr_init(attrs + i); + + if (!perf_pmu__has_hybrid()) + return evlist__add_attrs(evlist, attrs, nr_attrs); + + for (i = 0; i < nr_attrs; i++) { + if (attrs[i].type == PERF_TYPE_SOFTWARE) { + evsel = evsel__new(attrs + i); + if (evsel == NULL) + goto out_delete_partial_list; + list_add_tail(&evsel->core.node, &head); + continue; + } + + perf_pmu__for_each_hybrid_pmu(pmu) { + evsel = evsel__new(attrs + i); + if (evsel == NULL) + goto out_delete_partial_list; + evsel->core.attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; + cpus = perf_cpu_map__get(pmu->cpus); + evsel->core.cpus = cpus; + evsel->core.own_cpus = perf_cpu_map__get(cpus); + evsel->pmu_name = strdup(pmu->name); + list_add_tail(&evsel->core.node, &head); + } + } + + evlist__splice_list_tail(evlist, &head); + + return 0; + +out_delete_partial_list: + __evlist__for_each_entry_safe(&head, n, evsel) + evsel__delete(evsel); + return -1; +} + +int arch_evlist__add_default_attrs(struct evlist *evlist, + struct perf_event_attr *attrs, + size_t nr_attrs) +{ + if (nr_attrs) + return ___evlist__add_default_attrs(evlist, attrs, nr_attrs); + + return topdown_parse_events(evlist); +} + +struct evsel *arch_evlist__leader(struct list_head *list) +{ + struct evsel *evsel, *first, *slots = NULL; + bool has_topdown = false; + + first = list_first_entry(list, struct evsel, core.node); + + if (!topdown_sys_has_perf_metrics()) + return first; + + /* If there is a slots event and a topdown event then the slots event comes first. */ + __evlist__for_each_entry(list, evsel) { + if (evsel->pmu_name && !strncmp(evsel->pmu_name, "cpu", 3) && evsel->name) { + if (strcasestr(evsel->name, "slots")) { + slots = evsel; + if (slots == first) + return first; + } + if (strcasestr(evsel->name, "topdown")) + has_topdown = true; + if (slots && has_topdown) + return slots; + } + } + return first; +} diff --git a/tools/perf/arch/x86/util/evsel.c b/tools/perf/arch/x86/util/evsel.c new file mode 100644 index 000000000000..ea3972d785d1 --- /dev/null +++ b/tools/perf/arch/x86/util/evsel.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include <stdlib.h> +#include "util/evsel.h" +#include "util/env.h" +#include "util/pmu.h" +#include "linux/string.h" +#include "evsel.h" +#include "util/debug.h" + +#define IBS_FETCH_L3MISSONLY (1ULL << 59) +#define IBS_OP_L3MISSONLY (1ULL << 16) + +void arch_evsel__set_sample_weight(struct evsel *evsel) +{ + evsel__set_sample_bit(evsel, WEIGHT_STRUCT); +} + +void arch_evsel__fixup_new_cycles(struct perf_event_attr *attr) +{ + struct perf_env env = { .total_mem = 0, } ; + + if (!perf_env__cpuid(&env)) + return; + + /* + * On AMD, precise cycles event sampling internally uses IBS pmu. + * But IBS does not have filtering capabilities and perf by default + * sets exclude_guest = 1. This makes IBS pmu event init fail and + * thus perf ends up doing non-precise sampling. Avoid it by clearing + * exclude_guest. + */ + if (env.cpuid && strstarts(env.cpuid, "AuthenticAMD")) + attr->exclude_guest = 0; + + free(env.cpuid); +} + +/* Check whether the evsel's PMU supports the perf metrics */ +bool evsel__sys_has_perf_metrics(const struct evsel *evsel) +{ + const char *pmu_name = evsel->pmu_name ? evsel->pmu_name : "cpu"; + + /* + * The PERF_TYPE_RAW type is the core PMU type, e.g., "cpu" PMU + * on a non-hybrid machine, "cpu_core" PMU on a hybrid machine. + * The slots event is only available for the core PMU, which + * supports the perf metrics feature. + * Checking both the PERF_TYPE_RAW type and the slots event + * should be good enough to detect the perf metrics feature. + */ + if ((evsel->core.attr.type == PERF_TYPE_RAW) && + pmu_have_event(pmu_name, "slots")) + return true; + + return false; +} + +bool arch_evsel__must_be_in_group(const struct evsel *evsel) +{ + if (!evsel__sys_has_perf_metrics(evsel)) + return false; + + return evsel->name && + (strcasestr(evsel->name, "slots") || + strcasestr(evsel->name, "topdown")); +} + +int arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size) +{ + u64 event = evsel->core.attr.config & PERF_HW_EVENT_MASK; + u64 pmu = evsel->core.attr.config >> PERF_PMU_TYPE_SHIFT; + const char *event_name; + + if (event < PERF_COUNT_HW_MAX && evsel__hw_names[event]) + event_name = evsel__hw_names[event]; + else + event_name = "unknown-hardware"; + + /* The PMU type is not required for the non-hybrid platform. */ + if (!pmu) + return scnprintf(bf, size, "%s", event_name); + + return scnprintf(bf, size, "%s/%s/", + evsel->pmu_name ? evsel->pmu_name : "cpu", + event_name); +} + +static void ibs_l3miss_warn(void) +{ + pr_warning( +"WARNING: Hw internally resets sampling period when L3 Miss Filtering is enabled\n" +"and tagged operation does not cause L3 Miss. This causes sampling period skew.\n"); +} + +void arch__post_evsel_config(struct evsel *evsel, struct perf_event_attr *attr) +{ + struct perf_pmu *evsel_pmu, *ibs_fetch_pmu, *ibs_op_pmu; + static int warned_once; + /* 0: Uninitialized, 1: Yes, -1: No */ + static int is_amd; + + if (warned_once || is_amd == -1) + return; + + if (!is_amd) { + struct perf_env *env = evsel__env(evsel); + + if (!perf_env__cpuid(env) || !env->cpuid || + !strstarts(env->cpuid, "AuthenticAMD")) { + is_amd = -1; + return; + } + is_amd = 1; + } + + evsel_pmu = evsel__find_pmu(evsel); + if (!evsel_pmu) + return; + + ibs_fetch_pmu = perf_pmu__find("ibs_fetch"); + ibs_op_pmu = perf_pmu__find("ibs_op"); + + if (ibs_fetch_pmu && ibs_fetch_pmu->type == evsel_pmu->type) { + if (attr->config & IBS_FETCH_L3MISSONLY) { + ibs_l3miss_warn(); + warned_once = 1; + } + } else if (ibs_op_pmu && ibs_op_pmu->type == evsel_pmu->type) { + if (attr->config & IBS_OP_L3MISSONLY) { + ibs_l3miss_warn(); + warned_once = 1; + } + } +} diff --git a/tools/perf/arch/x86/util/evsel.h b/tools/perf/arch/x86/util/evsel.h new file mode 100644 index 000000000000..19ad1691374d --- /dev/null +++ b/tools/perf/arch/x86/util/evsel.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _EVSEL_H +#define _EVSEL_H 1 + +bool evsel__sys_has_perf_metrics(const struct evsel *evsel); + +#endif diff --git a/tools/perf/arch/x86/util/group.c b/tools/perf/arch/x86/util/group.c deleted file mode 100644 index e2f8034b8973..000000000000 --- a/tools/perf/arch/x86/util/group.c +++ /dev/null @@ -1,28 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -#include <stdio.h> -#include "api/fs/fs.h" -#include "util/group.h" - -/* - * Check whether we can use a group for top down. - * Without a group may get bad results due to multiplexing. - */ -bool arch_topdown_check_group(bool *warn) -{ - int n; - - if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0) - return false; - if (n > 0) { - *warn = true; - return false; - } - return true; -} - -void arch_topdown_group_warn(void) -{ - fprintf(stderr, - "nmi_watchdog enabled with topdown. May give wrong results.\n" - "Disable with echo 0 > /proc/sys/kernel/nmi_watchdog\n"); -} diff --git a/tools/perf/arch/x86/util/header.c b/tools/perf/arch/x86/util/header.c index 578c8c568ffd..a51444a77a5f 100644 --- a/tools/perf/arch/x86/util/header.c +++ b/tools/perf/arch/x86/util/header.c @@ -9,18 +9,17 @@ #include "../../../util/debug.h" #include "../../../util/header.h" +#include "cpuid.h" -static inline void -cpuid(unsigned int op, unsigned int *a, unsigned int *b, unsigned int *c, - unsigned int *d) +void get_cpuid_0(char *vendor, unsigned int *lvl) { - __asm__ __volatile__ (".byte 0x53\n\tcpuid\n\t" - "movl %%ebx, %%esi\n\t.byte 0x5b" - : "=a" (*a), - "=S" (*b), - "=c" (*c), - "=d" (*d) - : "a" (op)); + unsigned int b, c, d; + + cpuid(0, 0, lvl, &b, &c, &d); + strncpy(&vendor[0], (char *)(&b), 4); + strncpy(&vendor[4], (char *)(&d), 4); + strncpy(&vendor[8], (char *)(&c), 4); + vendor[12] = '\0'; } static int @@ -31,14 +30,10 @@ __get_cpuid(char *buffer, size_t sz, const char *fmt) int nb; char vendor[16]; - cpuid(0, &lvl, &b, &c, &d); - strncpy(&vendor[0], (char *)(&b), 4); - strncpy(&vendor[4], (char *)(&d), 4); - strncpy(&vendor[8], (char *)(&c), 4); - vendor[12] = '\0'; + get_cpuid_0(vendor, &lvl); if (lvl >= 1) { - cpuid(1, &a, &b, &c, &d); + cpuid(1, 0, &a, &b, &c, &d); family = (a >> 8) & 0xf; /* bits 11 - 8 */ model = (a >> 4) & 0xf; /* Bits 7 - 4 */ diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c index 09f93800bffd..439c2956f3e7 100644 --- a/tools/perf/arch/x86/util/intel-bts.c +++ b/tools/perf/arch/x86/util/intel-bts.c @@ -110,7 +110,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, container_of(itr, struct intel_bts_recording, itr); struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu; struct evsel *evsel, *intel_bts_evsel = NULL; - const struct perf_cpu_map *cpus = evlist->core.cpus; + const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus; bool privileged = perf_event_paranoid_check(-1); if (opts->auxtrace_sample_mode) { @@ -129,6 +129,7 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, } evsel->core.attr.freq = 0; evsel->core.attr.sample_period = 1; + evsel->needs_auxtrace_mmap = true; intel_bts_evsel = evsel; opts->full_auxtrace = true; } @@ -218,13 +219,13 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, * To obtain the auxtrace buffer file descriptor, the auxtrace event * must come first. */ - perf_evlist__to_front(evlist, intel_bts_evsel); + evlist__to_front(evlist, intel_bts_evsel); /* * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ if (!perf_cpu_map__empty(cpus)) - perf_evsel__set_sample_bit(intel_bts_evsel, CPU); + evsel__set_sample_bit(intel_bts_evsel, CPU); } /* Add dummy event to keep tracking */ @@ -232,13 +233,13 @@ static int intel_bts_recording_options(struct auxtrace_record *itr, struct evsel *tracking_evsel; int err; - err = parse_events(evlist, "dummy:u", NULL); + err = parse_event(evlist, "dummy:u"); if (err) return err; tracking_evsel = evlist__last(evlist); - perf_evlist__set_tracking_event(evlist, tracking_evsel); + evlist__set_tracking_event(evlist, tracking_evsel); tracking_evsel->core.attr.freq = 0; tracking_evsel->core.attr.sample_period = 1; diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index 1643aed8c4c8..af102f471e9f 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -11,6 +11,7 @@ #include <linux/bitops.h> #include <linux/log2.h> #include <linux/zalloc.h> +#include <linux/err.h> #include <cpuid.h> #include "../../../util/session.h" @@ -25,6 +26,7 @@ #include "../../../util/pmu.h" #include "../../../util/debug.h" #include "../../../util/auxtrace.h" +#include "../../../util/perf_api_probe.h" #include "../../../util/record.h" #include "../../../util/target.h" #include "../../../util/tsc.h" @@ -58,7 +60,8 @@ struct intel_pt_recording { size_t priv_size; }; -static int intel_pt_parse_terms_with_default(struct list_head *formats, +static int intel_pt_parse_terms_with_default(const char *pmu_name, + struct list_head *formats, const char *str, u64 *config) { @@ -77,7 +80,8 @@ static int intel_pt_parse_terms_with_default(struct list_head *formats, goto out_free; attr.config = *config; - err = perf_pmu__config_terms(formats, &attr, terms, true, NULL); + err = perf_pmu__config_terms(pmu_name, formats, &attr, terms, true, + NULL); if (err) goto out_free; @@ -87,11 +91,12 @@ out_free: return err; } -static int intel_pt_parse_terms(struct list_head *formats, const char *str, - u64 *config) +static int intel_pt_parse_terms(const char *pmu_name, struct list_head *formats, + const char *str, u64 *config) { *config = 0; - return intel_pt_parse_terms_with_default(formats, str, config); + return intel_pt_parse_terms_with_default(pmu_name, formats, str, + config); } static u64 intel_pt_masked_bits(u64 mask, u64 bits) @@ -228,7 +233,8 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu) pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf); - intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config); + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, buf, + &config); return config; } @@ -301,6 +307,7 @@ intel_pt_info_priv_size(struct auxtrace_record *itr, struct evlist *evlist) ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) + intel_pt_filter_bytes(filter); + ptr->priv_size += sizeof(u64); /* Cap Event Trace */ return ptr->priv_size; } @@ -330,25 +337,32 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, unsigned long max_non_turbo_ratio; size_t filter_str_len; const char *filter; + int event_trace; __u64 *info; int err; if (priv_size != ptr->priv_size) return -EINVAL; - intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit); - intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp", - &noretcomp_bit); - intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit); + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "tsc", &tsc_bit); + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "noretcomp", &noretcomp_bit); + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "mtc", &mtc_bit); mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format, "mtc_period"); - intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit); + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "cyc", &cyc_bit); intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d); if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio", "%lu", &max_non_turbo_ratio) != 1) max_non_turbo_ratio = 0; + if (perf_pmu__scan_file(intel_pt_pmu, "caps/event_trace", + "%d", &event_trace) != 1) + event_trace = 0; filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu); filter_str_len = filter ? strlen(filter) : 0; @@ -369,7 +383,7 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, ui__warning("Intel Processor Trace: TSC not available\n"); } - per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus); + per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus); auxtrace_info->type = PERF_AUXTRACE_INTEL_PT; auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type; @@ -399,6 +413,8 @@ static int intel_pt_info_fill(struct auxtrace_record *itr, info += len >> 3; } + *info++ = event_trace; + return 0; } @@ -408,23 +424,17 @@ static int intel_pt_track_switches(struct evlist *evlist) struct evsel *evsel; int err; - if (!perf_evlist__can_select_event(evlist, sched_switch)) + if (!evlist__can_select_event(evlist, sched_switch)) return -EPERM; - err = parse_events(evlist, sched_switch, NULL); - if (err) { - pr_debug2("%s: failed to parse %s, error %d\n", + evsel = evlist__add_sched_switch(evlist, true); + if (IS_ERR(evsel)) { + err = PTR_ERR(evsel); + pr_debug2("%s: failed to create %s, error = %d\n", __func__, sched_switch, err); return err; } - evsel = evlist__last(evlist); - - perf_evsel__set_sample_bit(evsel, CPU); - perf_evsel__set_sample_bit(evsel, TIME); - - evsel->core.system_wide = true; - evsel->no_aux_samples = true; evsel->immediate = true; return 0; @@ -555,10 +565,9 @@ static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu, static void intel_pt_config_sample_mode(struct perf_pmu *intel_pt_pmu, struct evsel *evsel) { - struct perf_evsel_config_term *term; u64 user_bits = 0, bits; + struct evsel_config_term *term = evsel__get_config_term(evsel, CFG_CHG); - term = perf_evsel__get_config_term(evsel, CFG_CHG); if (term) user_bits = term->val.cfg_chg; @@ -618,7 +627,7 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu; bool have_timing_info, need_immediate = false; struct evsel *evsel, *intel_pt_evsel = NULL; - const struct perf_cpu_map *cpus = evlist->core.cpus; + const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus; bool privileged = perf_event_paranoid_check(-1); u64 tsc_bit; int err; @@ -634,6 +643,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } evsel->core.attr.freq = 0; evsel->core.attr.sample_period = 1; + evsel->no_aux_samples = true; + evsel->needs_auxtrace_mmap = true; intel_pt_evsel = evsel; opts->full_auxtrace = true; } @@ -768,7 +779,14 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } } - intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit); + if (!opts->auxtrace_snapshot_mode && !opts->auxtrace_sample_mode) { + u32 aux_watermark = opts->auxtrace_mmap_pages * page_size / 4; + + intel_pt_evsel->core.attr.aux_watermark = aux_watermark; + } + + intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, + "tsc", &tsc_bit); if (opts->full_auxtrace && (intel_pt_evsel->core.attr.config & tsc_bit)) have_timing_info = true; @@ -779,7 +797,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * Per-cpu recording needs sched_switch events to distinguish different * threads. */ - if (have_timing_info && !perf_cpu_map__empty(cpus)) { + if (have_timing_info && !perf_cpu_map__empty(cpus) && + !record_opts__no_switch_events(opts)) { if (perf_can_record_switch_events()) { bool cpu_wide = !target__none(&opts->target) && !target__has_task(&opts->target); @@ -787,24 +806,17 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, if (!cpu_wide && perf_can_record_cpu_wide()) { struct evsel *switch_evsel; - err = parse_events(evlist, "dummy:u", NULL); - if (err) - return err; - - switch_evsel = evlist__last(evlist); + switch_evsel = evlist__add_dummy_on_all_cpus(evlist); + if (!switch_evsel) + return -ENOMEM; - switch_evsel->core.attr.freq = 0; - switch_evsel->core.attr.sample_period = 1; switch_evsel->core.attr.context_switch = 1; - - switch_evsel->core.system_wide = true; - switch_evsel->no_aux_samples = true; switch_evsel->immediate = true; - perf_evsel__set_sample_bit(switch_evsel, TID); - perf_evsel__set_sample_bit(switch_evsel, TIME); - perf_evsel__set_sample_bit(switch_evsel, CPU); - perf_evsel__reset_sample_bit(switch_evsel, BRANCH_STACK); + evsel__set_sample_bit(switch_evsel, TID); + evsel__set_sample_bit(switch_evsel, TIME); + evsel__set_sample_bit(switch_evsel, CPU); + evsel__reset_sample_bit(switch_evsel, BRANCH_STACK); opts->record_switch_events = false; ptr->have_sched_switch = 3; @@ -827,46 +839,52 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, } } + if (have_timing_info && !intel_pt_evsel->core.attr.exclude_kernel && + perf_can_record_text_poke_events() && perf_can_record_cpu_wide()) + opts->text_poke = true; + if (intel_pt_evsel) { /* * To obtain the auxtrace buffer file descriptor, the auxtrace * event must come first. */ - perf_evlist__to_front(evlist, intel_pt_evsel); + evlist__to_front(evlist, intel_pt_evsel); /* * In the case of per-cpu mmaps, we need the CPU on the * AUX event. */ if (!perf_cpu_map__empty(cpus)) - perf_evsel__set_sample_bit(intel_pt_evsel, CPU); + evsel__set_sample_bit(intel_pt_evsel, CPU); } /* Add dummy event to keep tracking */ if (opts->full_auxtrace) { + bool need_system_wide_tracking; struct evsel *tracking_evsel; - err = parse_events(evlist, "dummy:u", NULL); - if (err) - return err; - - tracking_evsel = evlist__last(evlist); + /* + * User space tasks can migrate between CPUs, so when tracing + * selected CPUs, sideband for all CPUs is still needed. + */ + need_system_wide_tracking = opts->target.cpu_list && + !intel_pt_evsel->core.attr.exclude_user; - perf_evlist__set_tracking_event(evlist, tracking_evsel); + tracking_evsel = evlist__add_aux_dummy(evlist, need_system_wide_tracking); + if (!tracking_evsel) + return -ENOMEM; - tracking_evsel->core.attr.freq = 0; - tracking_evsel->core.attr.sample_period = 1; + evlist__set_tracking_event(evlist, tracking_evsel); - tracking_evsel->no_aux_samples = true; if (need_immediate) tracking_evsel->immediate = true; /* In per-cpu case, always need the time of mmap events etc */ if (!perf_cpu_map__empty(cpus)) { - perf_evsel__set_sample_bit(tracking_evsel, TIME); + evsel__set_sample_bit(tracking_evsel, TIME); /* And the CPU for switch events */ - perf_evsel__set_sample_bit(tracking_evsel, CPU); + evsel__set_sample_bit(tracking_evsel, CPU); } - perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK); + evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK); } /* @@ -874,7 +892,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, * per-cpu with no sched_switch (except workload-only). */ if (!ptr->have_sched_switch && !perf_cpu_map__empty(cpus) && - !target__none(&opts->target)) + !target__none(&opts->target) && + !intel_pt_evsel->core.attr.exclude_user) ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n"); return 0; diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c new file mode 100644 index 000000000000..404de795ec0b --- /dev/null +++ b/tools/perf/arch/x86/util/iostat.c @@ -0,0 +1,470 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * perf iostat + * + * Copyright (C) 2020, Intel Corporation + * + * Authors: Alexander Antonov <alexander.antonov@linux.intel.com> + */ + +#include <api/fs/fs.h> +#include <linux/kernel.h> +#include <linux/err.h> +#include <limits.h> +#include <stdio.h> +#include <string.h> +#include <errno.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <dirent.h> +#include <unistd.h> +#include <stdlib.h> +#include <regex.h> +#include "util/cpumap.h" +#include "util/debug.h" +#include "util/iostat.h" +#include "util/counts.h" +#include "path.h" + +#ifndef MAX_PATH +#define MAX_PATH 1024 +#endif + +#define UNCORE_IIO_PMU_PATH "devices/uncore_iio_%d" +#define SYSFS_UNCORE_PMU_PATH "%s/"UNCORE_IIO_PMU_PATH +#define PLATFORM_MAPPING_PATH UNCORE_IIO_PMU_PATH"/die%d" + +/* + * Each metric requiries one IIO event which increments at every 4B transfer + * in corresponding direction. The formulas to compute metrics are generic: + * #EventCount * 4B / (1024 * 1024) + */ +static const char * const iostat_metrics[] = { + "Inbound Read(MB)", + "Inbound Write(MB)", + "Outbound Read(MB)", + "Outbound Write(MB)", +}; + +static inline int iostat_metrics_count(void) +{ + return sizeof(iostat_metrics) / sizeof(char *); +} + +static const char *iostat_metric_by_idx(int idx) +{ + return *(iostat_metrics + idx % iostat_metrics_count()); +} + +struct iio_root_port { + u32 domain; + u8 bus; + u8 die; + u8 pmu_idx; + int idx; +}; + +struct iio_root_ports_list { + struct iio_root_port **rps; + int nr_entries; +}; + +static struct iio_root_ports_list *root_ports; + +static void iio_root_port_show(FILE *output, + const struct iio_root_port * const rp) +{ + if (output && rp) + fprintf(output, "S%d-uncore_iio_%d<%04x:%02x>\n", + rp->die, rp->pmu_idx, rp->domain, rp->bus); +} + +static struct iio_root_port *iio_root_port_new(u32 domain, u8 bus, + u8 die, u8 pmu_idx) +{ + struct iio_root_port *p = calloc(1, sizeof(*p)); + + if (p) { + p->domain = domain; + p->bus = bus; + p->die = die; + p->pmu_idx = pmu_idx; + } + return p; +} + +static void iio_root_ports_list_free(struct iio_root_ports_list *list) +{ + int idx; + + if (list) { + for (idx = 0; idx < list->nr_entries; idx++) + free(list->rps[idx]); + free(list->rps); + free(list); + } +} + +static struct iio_root_port *iio_root_port_find_by_notation( + const struct iio_root_ports_list * const list, u32 domain, u8 bus) +{ + int idx; + struct iio_root_port *rp; + + if (list) { + for (idx = 0; idx < list->nr_entries; idx++) { + rp = list->rps[idx]; + if (rp && rp->domain == domain && rp->bus == bus) + return rp; + } + } + return NULL; +} + +static int iio_root_ports_list_insert(struct iio_root_ports_list *list, + struct iio_root_port * const rp) +{ + struct iio_root_port **tmp_buf; + + if (list && rp) { + rp->idx = list->nr_entries++; + tmp_buf = realloc(list->rps, + list->nr_entries * sizeof(*list->rps)); + if (!tmp_buf) { + pr_err("Failed to realloc memory\n"); + return -ENOMEM; + } + tmp_buf[rp->idx] = rp; + list->rps = tmp_buf; + } + return 0; +} + +static int iio_mapping(u8 pmu_idx, struct iio_root_ports_list * const list) +{ + char *buf; + char path[MAX_PATH]; + u32 domain; + u8 bus; + struct iio_root_port *rp; + size_t size; + int ret; + + for (int die = 0; die < cpu__max_node(); die++) { + scnprintf(path, MAX_PATH, PLATFORM_MAPPING_PATH, pmu_idx, die); + if (sysfs__read_str(path, &buf, &size) < 0) { + if (pmu_idx) + goto out; + pr_err("Mode iostat is not supported\n"); + return -1; + } + ret = sscanf(buf, "%04x:%02hhx", &domain, &bus); + free(buf); + if (ret != 2) { + pr_err("Invalid mapping data: iio_%d; die%d\n", + pmu_idx, die); + return -1; + } + rp = iio_root_port_new(domain, bus, die, pmu_idx); + if (!rp || iio_root_ports_list_insert(list, rp)) { + free(rp); + return -ENOMEM; + } + } +out: + return 0; +} + +static u8 iio_pmu_count(void) +{ + u8 pmu_idx = 0; + char path[MAX_PATH]; + const char *sysfs = sysfs__mountpoint(); + + if (sysfs) { + for (;; pmu_idx++) { + snprintf(path, sizeof(path), SYSFS_UNCORE_PMU_PATH, + sysfs, pmu_idx); + if (access(path, F_OK) != 0) + break; + } + } + return pmu_idx; +} + +static int iio_root_ports_scan(struct iio_root_ports_list **list) +{ + int ret = -ENOMEM; + struct iio_root_ports_list *tmp_list; + u8 pmu_count = iio_pmu_count(); + + if (!pmu_count) { + pr_err("Unsupported uncore pmu configuration\n"); + return -1; + } + + tmp_list = calloc(1, sizeof(*tmp_list)); + if (!tmp_list) + goto err; + + for (u8 pmu_idx = 0; pmu_idx < pmu_count; pmu_idx++) { + ret = iio_mapping(pmu_idx, tmp_list); + if (ret) + break; + } +err: + if (!ret) + *list = tmp_list; + else + iio_root_ports_list_free(tmp_list); + + return ret; +} + +static int iio_root_port_parse_str(u32 *domain, u8 *bus, char *str) +{ + int ret; + regex_t regex; + /* + * Expected format domain:bus: + * Valid domain range [0:ffff] + * Valid bus range [0:ff] + * Example: 0000:af, 0:3d, 01:7 + */ + regcomp(®ex, "^([a-f0-9A-F]{1,}):([a-f0-9A-F]{1,2})", REG_EXTENDED); + ret = regexec(®ex, str, 0, NULL, 0); + if (ret || sscanf(str, "%08x:%02hhx", domain, bus) != 2) + pr_warning("Unrecognized root port format: %s\n" + "Please use the following format:\n" + "\t [domain]:[bus]\n" + "\t for example: 0000:3d\n", str); + + regfree(®ex); + return ret; +} + +static int iio_root_ports_list_filter(struct iio_root_ports_list **list, + const char *filter) +{ + char *tok, *tmp, *filter_copy = NULL; + struct iio_root_port *rp; + u32 domain; + u8 bus; + int ret = -ENOMEM; + struct iio_root_ports_list *tmp_list = calloc(1, sizeof(*tmp_list)); + + if (!tmp_list) + goto err; + + filter_copy = strdup(filter); + if (!filter_copy) + goto err; + + for (tok = strtok_r(filter_copy, ",", &tmp); tok; + tok = strtok_r(NULL, ",", &tmp)) { + if (!iio_root_port_parse_str(&domain, &bus, tok)) { + rp = iio_root_port_find_by_notation(*list, domain, bus); + if (rp) { + (*list)->rps[rp->idx] = NULL; + ret = iio_root_ports_list_insert(tmp_list, rp); + if (ret) { + free(rp); + goto err; + } + } else if (!iio_root_port_find_by_notation(tmp_list, + domain, bus)) + pr_warning("Root port %04x:%02x were not found\n", + domain, bus); + } + } + + if (tmp_list->nr_entries == 0) { + pr_err("Requested root ports were not found\n"); + ret = -EINVAL; + } +err: + iio_root_ports_list_free(*list); + if (ret) + iio_root_ports_list_free(tmp_list); + else + *list = tmp_list; + + free(filter_copy); + return ret; +} + +static int iostat_event_group(struct evlist *evl, + struct iio_root_ports_list *list) +{ + int ret; + int idx; + const char *iostat_cmd_template = + "{uncore_iio_%x/event=0x83,umask=0x04,ch_mask=0xF,fc_mask=0x07/,\ + uncore_iio_%x/event=0x83,umask=0x01,ch_mask=0xF,fc_mask=0x07/,\ + uncore_iio_%x/event=0xc0,umask=0x04,ch_mask=0xF,fc_mask=0x07/,\ + uncore_iio_%x/event=0xc0,umask=0x01,ch_mask=0xF,fc_mask=0x07/}"; + const int len_template = strlen(iostat_cmd_template) + 1; + struct evsel *evsel = NULL; + int metrics_count = iostat_metrics_count(); + char *iostat_cmd = calloc(len_template, 1); + + if (!iostat_cmd) + return -ENOMEM; + + for (idx = 0; idx < list->nr_entries; idx++) { + sprintf(iostat_cmd, iostat_cmd_template, + list->rps[idx]->pmu_idx, list->rps[idx]->pmu_idx, + list->rps[idx]->pmu_idx, list->rps[idx]->pmu_idx); + ret = parse_event(evl, iostat_cmd); + if (ret) + goto err; + } + + evlist__for_each_entry(evl, evsel) { + evsel->priv = list->rps[evsel->core.idx / metrics_count]; + } + list->nr_entries = 0; +err: + iio_root_ports_list_free(list); + free(iostat_cmd); + return ret; +} + +int iostat_prepare(struct evlist *evlist, struct perf_stat_config *config) +{ + if (evlist->core.nr_entries > 0) { + pr_warning("The -e and -M options are not supported." + "All chosen events/metrics will be dropped\n"); + evlist__delete(evlist); + evlist = evlist__new(); + if (!evlist) + return -ENOMEM; + } + + config->metric_only = true; + config->aggr_mode = AGGR_GLOBAL; + + return iostat_event_group(evlist, root_ports); +} + +int iostat_parse(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + int ret; + struct perf_stat_config *config = (struct perf_stat_config *)opt->data; + + ret = iio_root_ports_scan(&root_ports); + if (!ret) { + config->iostat_run = true; + if (!str) + iostat_mode = IOSTAT_RUN; + else if (!strcmp(str, "list")) + iostat_mode = IOSTAT_LIST; + else { + iostat_mode = IOSTAT_RUN; + ret = iio_root_ports_list_filter(&root_ports, str); + } + } + return ret; +} + +void iostat_list(struct evlist *evlist, struct perf_stat_config *config) +{ + struct evsel *evsel; + struct iio_root_port *rp = NULL; + + evlist__for_each_entry(evlist, evsel) { + if (rp != evsel->priv) { + rp = evsel->priv; + iio_root_port_show(config->output, rp); + } + } +} + +void iostat_release(struct evlist *evlist) +{ + struct evsel *evsel; + struct iio_root_port *rp = NULL; + + evlist__for_each_entry(evlist, evsel) { + if (rp != evsel->priv) { + rp = evsel->priv; + free(evsel->priv); + } + } +} + +void iostat_prefix(struct evlist *evlist, + struct perf_stat_config *config, + char *prefix, struct timespec *ts) +{ + struct iio_root_port *rp = evlist->selected->priv; + + if (rp) { + if (ts) + sprintf(prefix, "%6lu.%09lu%s%04x:%02x%s", + ts->tv_sec, ts->tv_nsec, + config->csv_sep, rp->domain, rp->bus, + config->csv_sep); + else + sprintf(prefix, "%04x:%02x%s", rp->domain, rp->bus, + config->csv_sep); + } +} + +void iostat_print_header_prefix(struct perf_stat_config *config) +{ + if (config->csv_output) + fputs("port,", config->output); + else if (config->interval) + fprintf(config->output, "# time port "); + else + fprintf(config->output, " port "); +} + +void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel, + struct perf_stat_output_ctx *out) +{ + double iostat_value = 0; + u64 prev_count_val = 0; + const char *iostat_metric = iostat_metric_by_idx(evsel->core.idx); + u8 die = ((struct iio_root_port *)evsel->priv)->die; + struct perf_counts_values *count = perf_counts(evsel->counts, die, 0); + + if (count && count->run && count->ena) { + if (evsel->prev_raw_counts && !out->force_header) { + struct perf_counts_values *prev_count = + perf_counts(evsel->prev_raw_counts, die, 0); + + prev_count_val = prev_count->val; + prev_count->val = count->val; + } + iostat_value = (count->val - prev_count_val) / + ((double) count->run / count->ena); + } + out->print_metric(config, out->ctx, NULL, "%8.0f", iostat_metric, + iostat_value / (256 * 1024)); +} + +void iostat_print_counters(struct evlist *evlist, + struct perf_stat_config *config, struct timespec *ts, + char *prefix, iostat_print_counter_t print_cnt_cb) +{ + void *perf_device = NULL; + struct evsel *counter = evlist__first(evlist); + + evlist__set_selected(evlist, counter); + iostat_prefix(evlist, config, prefix, ts); + fprintf(config->output, "%s", prefix); + evlist__for_each_entry(evlist, counter) { + perf_device = evlist->selected->priv; + if (perf_device && perf_device != counter->priv) { + evlist__set_selected(evlist, counter); + iostat_prefix(evlist, config, prefix, ts); + fprintf(config->output, "\n%s", prefix); + } + print_cnt_cb(config, counter, prefix); + } + fputc('\n', config->output); +} diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c index c0775c39227f..c5dd54f6ef5e 100644 --- a/tools/perf/arch/x86/util/kvm-stat.c +++ b/tools/perf/arch/x86/util/kvm-stat.c @@ -31,8 +31,8 @@ const char *kvm_exit_trace = "kvm:kvm_exit"; static void mmio_event_get_key(struct evsel *evsel, struct perf_sample *sample, struct event_key *key) { - key->key = perf_evsel__intval(evsel, sample, "gpa"); - key->info = perf_evsel__intval(evsel, sample, "type"); + key->key = evsel__intval(evsel, sample, "gpa"); + key->info = evsel__intval(evsel, sample, "type"); } #define KVM_TRACE_MMIO_READ_UNSATISFIED 0 @@ -48,7 +48,7 @@ static bool mmio_event_begin(struct evsel *evsel, /* MMIO write begin event in kernel. */ if (!strcmp(evsel->name, "kvm:kvm_mmio") && - perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { + evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { mmio_event_get_key(evsel, sample, key); return true; } @@ -65,7 +65,7 @@ static bool mmio_event_end(struct evsel *evsel, struct perf_sample *sample, /* MMIO read end event in kernel.*/ if (!strcmp(evsel->name, "kvm:kvm_mmio") && - perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { + evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { mmio_event_get_key(evsel, sample, key); return true; } @@ -94,8 +94,8 @@ static void ioport_event_get_key(struct evsel *evsel, struct perf_sample *sample, struct event_key *key) { - key->key = perf_evsel__intval(evsel, sample, "port"); - key->info = perf_evsel__intval(evsel, sample, "rw"); + key->key = evsel__intval(evsel, sample, "port"); + key->info = evsel__intval(evsel, sample, "rw"); } static bool ioport_event_begin(struct evsel *evsel, @@ -133,11 +133,56 @@ static struct kvm_events_ops ioport_events = { .name = "IO Port Access" }; + /* The time of emulation msr is from kvm_msr to kvm_entry. */ +static void msr_event_get_key(struct evsel *evsel, + struct perf_sample *sample, + struct event_key *key) +{ + key->key = evsel__intval(evsel, sample, "ecx"); + key->info = evsel__intval(evsel, sample, "write"); +} + +static bool msr_event_begin(struct evsel *evsel, + struct perf_sample *sample, + struct event_key *key) +{ + if (!strcmp(evsel->name, "kvm:kvm_msr")) { + msr_event_get_key(evsel, sample, key); + return true; + } + + return false; +} + +static bool msr_event_end(struct evsel *evsel, + struct perf_sample *sample __maybe_unused, + struct event_key *key __maybe_unused) +{ + return kvm_entry_event(evsel); +} + +static void msr_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, + struct event_key *key, + char *decode) +{ + scnprintf(decode, decode_str_len, "%#llx:%s", + (unsigned long long)key->key, + key->info ? "W" : "R"); +} + +static struct kvm_events_ops msr_events = { + .is_begin_event = msr_event_begin, + .is_end_event = msr_event_end, + .decode_key = msr_event_decode_key, + .name = "MSR Access" +}; + const char *kvm_events_tp[] = { "kvm:kvm_entry", "kvm:kvm_exit", "kvm:kvm_mmio", "kvm:kvm_pio", + "kvm:kvm_msr", NULL, }; @@ -145,6 +190,7 @@ struct kvm_reg_events_ops kvm_reg_events_ops[] = { { .name = "vmexit", .ops = &exit_events }, { .name = "mmio", .ops = &mmio_events }, { .name = "ioport", .ops = &ioport_events }, + { .name = "msr", .ops = &msr_events }, { NULL, NULL }, }; diff --git a/tools/perf/arch/x86/util/mem-events.c b/tools/perf/arch/x86/util/mem-events.c new file mode 100644 index 000000000000..f683ac702247 --- /dev/null +++ b/tools/perf/arch/x86/util/mem-events.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "util/pmu.h" +#include "util/env.h" +#include "map_symbol.h" +#include "mem-events.h" +#include "linux/string.h" + +static char mem_loads_name[100]; +static bool mem_loads_name__init; +static char mem_stores_name[100]; + +#define MEM_LOADS_AUX 0x8203 +#define MEM_LOADS_AUX_NAME "{%s/mem-loads-aux/,%s/mem-loads,ldlat=%u/}:P" + +#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s } + +static struct perf_mem_event perf_mem_events_intel[PERF_MEM_EVENTS__MAX] = { + E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "%s/events/mem-loads"), + E("ldlat-stores", "%s/mem-stores/P", "%s/events/mem-stores"), + E(NULL, NULL, NULL), +}; + +static struct perf_mem_event perf_mem_events_amd[PERF_MEM_EVENTS__MAX] = { + E(NULL, NULL, NULL), + E(NULL, NULL, NULL), + E("mem-ldst", "ibs_op//", "ibs_op"), +}; + +static int perf_mem_is_amd_cpu(void) +{ + struct perf_env env = { .total_mem = 0, }; + + perf_env__cpuid(&env); + if (env.cpuid && strstarts(env.cpuid, "AuthenticAMD")) + return 1; + return -1; +} + +struct perf_mem_event *perf_mem_events__ptr(int i) +{ + /* 0: Uninitialized, 1: Yes, -1: No */ + static int is_amd; + + if (i >= PERF_MEM_EVENTS__MAX) + return NULL; + + if (!is_amd) + is_amd = perf_mem_is_amd_cpu(); + + if (is_amd == 1) + return &perf_mem_events_amd[i]; + + return &perf_mem_events_intel[i]; +} + +bool is_mem_loads_aux_event(struct evsel *leader) +{ + if (perf_pmu__find("cpu")) { + if (!pmu_have_event("cpu", "mem-loads-aux")) + return false; + } else if (perf_pmu__find("cpu_core")) { + if (!pmu_have_event("cpu_core", "mem-loads-aux")) + return false; + } + + return leader->core.attr.config == MEM_LOADS_AUX; +} + +char *perf_mem_events__name(int i, char *pmu_name) +{ + struct perf_mem_event *e = perf_mem_events__ptr(i); + + if (!e) + return NULL; + + if (i == PERF_MEM_EVENTS__LOAD) { + if (mem_loads_name__init && !pmu_name) + return mem_loads_name; + + if (!pmu_name) { + mem_loads_name__init = true; + pmu_name = (char *)"cpu"; + } + + if (pmu_have_event(pmu_name, "mem-loads-aux")) { + scnprintf(mem_loads_name, sizeof(mem_loads_name), + MEM_LOADS_AUX_NAME, pmu_name, pmu_name, + perf_mem_events__loads_ldlat); + } else { + scnprintf(mem_loads_name, sizeof(mem_loads_name), + e->name, pmu_name, + perf_mem_events__loads_ldlat); + } + return mem_loads_name; + } + + if (i == PERF_MEM_EVENTS__STORE) { + if (!pmu_name) + pmu_name = (char *)"cpu"; + + scnprintf(mem_stores_name, sizeof(mem_stores_name), + e->name, pmu_name); + return mem_stores_name; + } + + return (char *)e->name; +} diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c index fca81b39b09f..0ed177991ad0 100644 --- a/tools/perf/arch/x86/util/perf_regs.c +++ b/tools/perf/arch/x86/util/perf_regs.c @@ -9,6 +9,8 @@ #include "../../../util/perf_regs.h" #include "../../../util/debug.h" #include "../../../util/event.h" +#include "../../../util/pmu.h" +#include "../../../util/pmu-hybrid.h" const struct sample_reg sample_reg_masks[] = { SMPL_REG(AX, PERF_REG_X86_AX), @@ -165,7 +167,7 @@ static int sdt_init_op_regex(void) /* * Max x86 register name length is 5(ex: %r15d). So, 6th char * should always contain NULL. This helps to find register name - * length using strlen, insted of maintaing one more variable. + * length using strlen, instead of maintaining one more variable. */ #define SDT_REG_NAME_SIZE 6 @@ -207,7 +209,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op) * and displacement 0 (Both sign and displacement 0 are * optional so it may be empty). Use one more character * to hold last NULL so that strlen can be used to find - * prefix length, instead of maintaing one more variable. + * prefix length, instead of maintaining one more variable. */ char prefix[3] = {0}; @@ -284,12 +286,22 @@ uint64_t arch__intr_reg_mask(void) .disabled = 1, .exclude_kernel = 1, }; + struct perf_pmu *pmu; int fd; /* * In an unnamed union, init it here to build on older gcc versions */ attr.sample_period = 1; + if (perf_pmu__has_hybrid()) { + /* + * The same register set is supported among different hybrid PMUs. + * Only check the first available one. + */ + pmu = list_first_entry(&perf_pmu__hybrid_pmus, typeof(*pmu), hybrid_list); + attr.config |= (__u64)pmu->type << PERF_PMU_TYPE_SHIFT; + } + event_attr_init(&attr); fd = sys_perf_event_open(&attr, 0, -1, -1, 0); diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c index d48d608517fd..74d69db1ea99 100644 --- a/tools/perf/arch/x86/util/pmu.c +++ b/tools/perf/arch/x86/util/pmu.c @@ -1,12 +1,30 @@ // SPDX-License-Identifier: GPL-2.0 #include <string.h> - +#include <stdio.h> +#include <sys/types.h> +#include <dirent.h> +#include <fcntl.h> #include <linux/stddef.h> #include <linux/perf_event.h> +#include <linux/zalloc.h> +#include <api/fs/fs.h> +#include <errno.h> #include "../../../util/intel-pt.h" #include "../../../util/intel-bts.h" #include "../../../util/pmu.h" +#include "../../../util/fncache.h" + +#define TEMPLATE_ALIAS "%s/bus/event_source/devices/%s/alias" + +struct pmu_alias { + char *name; + char *alias; + struct list_head list; +}; + +static LIST_HEAD(pmu_alias_name_list); +static bool cached_list; struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused) { @@ -18,3 +36,138 @@ struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __mayb #endif return NULL; } + +static void pmu_alias__delete(struct pmu_alias *pmu_alias) +{ + if (!pmu_alias) + return; + + zfree(&pmu_alias->name); + zfree(&pmu_alias->alias); + free(pmu_alias); +} + +static struct pmu_alias *pmu_alias__new(char *name, char *alias) +{ + struct pmu_alias *pmu_alias = zalloc(sizeof(*pmu_alias)); + + if (pmu_alias) { + pmu_alias->name = strdup(name); + if (!pmu_alias->name) + goto out_delete; + + pmu_alias->alias = strdup(alias); + if (!pmu_alias->alias) + goto out_delete; + } + return pmu_alias; + +out_delete: + pmu_alias__delete(pmu_alias); + return NULL; +} + +static int setup_pmu_alias_list(void) +{ + char path[PATH_MAX]; + DIR *dir; + struct dirent *dent; + const char *sysfs = sysfs__mountpoint(); + struct pmu_alias *pmu_alias; + char buf[MAX_PMU_NAME_LEN]; + FILE *file; + int ret = -ENOMEM; + + if (!sysfs) + return -1; + + snprintf(path, PATH_MAX, + "%s" EVENT_SOURCE_DEVICE_PATH, sysfs); + + dir = opendir(path); + if (!dir) + return -errno; + + while ((dent = readdir(dir))) { + if (!strcmp(dent->d_name, ".") || + !strcmp(dent->d_name, "..")) + continue; + + snprintf(path, PATH_MAX, + TEMPLATE_ALIAS, sysfs, dent->d_name); + + if (!file_available(path)) + continue; + + file = fopen(path, "r"); + if (!file) + continue; + + if (!fgets(buf, sizeof(buf), file)) { + fclose(file); + continue; + } + + fclose(file); + + /* Remove the last '\n' */ + buf[strlen(buf) - 1] = 0; + + pmu_alias = pmu_alias__new(dent->d_name, buf); + if (!pmu_alias) + goto close_dir; + + list_add_tail(&pmu_alias->list, &pmu_alias_name_list); + } + + ret = 0; + +close_dir: + closedir(dir); + return ret; +} + +static char *__pmu_find_real_name(const char *name) +{ + struct pmu_alias *pmu_alias; + + list_for_each_entry(pmu_alias, &pmu_alias_name_list, list) { + if (!strcmp(name, pmu_alias->alias)) + return pmu_alias->name; + } + + return (char *)name; +} + +char *pmu_find_real_name(const char *name) +{ + if (cached_list) + return __pmu_find_real_name(name); + + setup_pmu_alias_list(); + cached_list = true; + + return __pmu_find_real_name(name); +} + +static char *__pmu_find_alias_name(const char *name) +{ + struct pmu_alias *pmu_alias; + + list_for_each_entry(pmu_alias, &pmu_alias_name_list, list) { + if (!strcmp(name, pmu_alias->name)) + return pmu_alias->alias; + } + return NULL; +} + +char *pmu_find_alias_name(const char *name) +{ + if (cached_list) + return __pmu_find_alias_name(name); + + setup_pmu_alias_list(); + cached_list = true; + + return __pmu_find_alias_name(name); +} diff --git a/tools/perf/arch/x86/util/topdown.c b/tools/perf/arch/x86/util/topdown.c new file mode 100644 index 000000000000..54810f9acd6f --- /dev/null +++ b/tools/perf/arch/x86/util/topdown.c @@ -0,0 +1,126 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <stdio.h> +#include "api/fs/fs.h" +#include "util/pmu.h" +#include "util/topdown.h" +#include "util/evlist.h" +#include "util/debug.h" +#include "util/pmu-hybrid.h" +#include "topdown.h" +#include "evsel.h" + +#define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}" +#define TOPDOWN_L1_EVENTS_CORE "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/}" +#define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}" +#define TOPDOWN_L2_EVENTS_CORE "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/,cpu_core/topdown-heavy-ops/,cpu_core/topdown-br-mispredict/,cpu_core/topdown-fetch-lat/,cpu_core/topdown-mem-bound/}" + +/* Check whether there is a PMU which supports the perf metrics. */ +bool topdown_sys_has_perf_metrics(void) +{ + static bool has_perf_metrics; + static bool cached; + struct perf_pmu *pmu; + + if (cached) + return has_perf_metrics; + + /* + * The perf metrics feature is a core PMU feature. + * The PERF_TYPE_RAW type is the type of a core PMU. + * The slots event is only available when the core PMU + * supports the perf metrics feature. + */ + pmu = perf_pmu__find_by_type(PERF_TYPE_RAW); + if (pmu && pmu_have_event(pmu->name, "slots")) + has_perf_metrics = true; + + cached = true; + return has_perf_metrics; +} + +/* + * Check whether we can use a group for top down. + * Without a group may get bad results due to multiplexing. + */ +bool arch_topdown_check_group(bool *warn) +{ + int n; + + if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0) + return false; + if (n > 0) { + *warn = true; + return false; + } + return true; +} + +void arch_topdown_group_warn(void) +{ + fprintf(stderr, + "nmi_watchdog enabled with topdown. May give wrong results.\n" + "Disable with echo 0 > /proc/sys/kernel/nmi_watchdog\n"); +} + +#define TOPDOWN_SLOTS 0x0400 + +/* + * Check whether a topdown group supports sample-read. + * + * Only Topdown metric supports sample-read. The slots + * event must be the leader of the topdown group. + */ + +bool arch_topdown_sample_read(struct evsel *leader) +{ + if (!evsel__sys_has_perf_metrics(leader)) + return false; + + if (leader->core.attr.config == TOPDOWN_SLOTS) + return true; + + return false; +} + +const char *arch_get_topdown_pmu_name(struct evlist *evlist, bool warn) +{ + const char *pmu_name; + + if (!perf_pmu__has_hybrid()) + return "cpu"; + + if (!evlist->hybrid_pmu_name) { + if (warn) + pr_warning("WARNING: default to use cpu_core topdown events\n"); + evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu("core"); + } + + pmu_name = evlist->hybrid_pmu_name; + + return pmu_name; +} + +int topdown_parse_events(struct evlist *evlist) +{ + const char *topdown_events; + const char *pmu_name; + + if (!topdown_sys_has_perf_metrics()) + return 0; + + pmu_name = arch_get_topdown_pmu_name(evlist, false); + + if (pmu_have_event(pmu_name, "topdown-heavy-ops")) { + if (!strcmp(pmu_name, "cpu_core")) + topdown_events = TOPDOWN_L2_EVENTS_CORE; + else + topdown_events = TOPDOWN_L2_EVENTS; + } else { + if (!strcmp(pmu_name, "cpu_core")) + topdown_events = TOPDOWN_L1_EVENTS_CORE; + else + topdown_events = TOPDOWN_L1_EVENTS; + } + + return parse_event(evlist, topdown_events); +} diff --git a/tools/perf/arch/x86/util/topdown.h b/tools/perf/arch/x86/util/topdown.h new file mode 100644 index 000000000000..7eb81f042838 --- /dev/null +++ b/tools/perf/arch/x86/util/topdown.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _TOPDOWN_H +#define _TOPDOWN_H 1 + +bool topdown_sys_has_perf_metrics(void); +int topdown_parse_events(struct evlist *evlist); + +#endif diff --git a/tools/perf/arch/x86/util/tsc.c b/tools/perf/arch/x86/util/tsc.c index 2f55afb14e1f..eb2b5195bd02 100644 --- a/tools/perf/arch/x86/util/tsc.c +++ b/tools/perf/arch/x86/util/tsc.c @@ -1,84 +1,90 @@ // SPDX-License-Identifier: GPL-2.0 -#include <stdbool.h> -#include <errno.h> - -#include <linux/stddef.h> -#include <linux/perf_event.h> - #include <linux/types.h> -#include <asm/barrier.h> +#include <math.h> +#include <string.h> + #include "../../../util/debug.h" -#include "../../../util/event.h" -#include "../../../util/synthetic-events.h" #include "../../../util/tsc.h" +#include "cpuid.h" -int perf_read_tsc_conversion(const struct perf_event_mmap_page *pc, - struct perf_tsc_conversion *tc) +u64 rdtsc(void) { - bool cap_user_time_zero; - u32 seq; - int i = 0; - - while (1) { - seq = pc->lock; - rmb(); - tc->time_mult = pc->time_mult; - tc->time_shift = pc->time_shift; - tc->time_zero = pc->time_zero; - cap_user_time_zero = pc->cap_user_time_zero; - rmb(); - if (pc->lock == seq && !(seq & 1)) - break; - if (++i > 10000) { - pr_debug("failed to get perf_event_mmap_page lock\n"); - return -EINVAL; - } - } + unsigned int low, high; - if (!cap_user_time_zero) - return -EOPNOTSUPP; + asm volatile("rdtsc" : "=a" (low), "=d" (high)); - return 0; + return low | ((u64)high) << 32; } -u64 rdtsc(void) +/* + * Derive the TSC frequency in Hz from the /proc/cpuinfo, for example: + * ... + * model name : Intel(R) Xeon(R) Gold 6154 CPU @ 3.00GHz + * ... + * will return 3000000000. + */ +static double cpuinfo_tsc_freq(void) { - unsigned int low, high; + double result = 0; + FILE *cpuinfo; + char *line = NULL; + size_t len = 0; - asm volatile("rdtsc" : "=a" (low), "=d" (high)); + cpuinfo = fopen("/proc/cpuinfo", "r"); + if (!cpuinfo) { + pr_err("Failed to read /proc/cpuinfo for TSC frequency"); + return NAN; + } + while (getline(&line, &len, cpuinfo) > 0) { + if (!strncmp(line, "model name", 10)) { + char *pos = strstr(line + 11, " @ "); - return low | ((u64)high) << 32; + if (pos && sscanf(pos, " @ %lfGHz", &result) == 1) { + result *= 1000000000; + goto out; + } + } + } +out: + if (fpclassify(result) == FP_ZERO) + pr_err("Failed to find TSC frequency in /proc/cpuinfo"); + + free(line); + fclose(cpuinfo); + return result; } -int perf_event__synth_time_conv(const struct perf_event_mmap_page *pc, - struct perf_tool *tool, - perf_event__handler_t process, - struct machine *machine) +double arch_get_tsc_freq(void) { - union perf_event event = { - .time_conv = { - .header = { - .type = PERF_RECORD_TIME_CONV, - .size = sizeof(struct perf_record_time_conv), - }, - }, - }; - struct perf_tsc_conversion tc; - int err; + unsigned int a, b, c, d, lvl; + static bool cached; + static double tsc; + char vendor[16]; - if (!pc) - return 0; - err = perf_read_tsc_conversion(pc, &tc); - if (err == -EOPNOTSUPP) + if (cached) + return tsc; + + cached = true; + get_cpuid_0(vendor, &lvl); + if (!strstr(vendor, "Intel")) return 0; - if (err) - return err; - pr_debug2("Synthesizing TSC conversion information\n"); + /* + * Don't support Time Stamp Counter and + * Nominal Core Crystal Clock Information Leaf. + */ + if (lvl < 0x15) { + tsc = cpuinfo_tsc_freq(); + return tsc; + } - event.time_conv.time_mult = tc.time_mult; - event.time_conv.time_shift = tc.time_shift; - event.time_conv.time_zero = tc.time_zero; + cpuid(0x15, 0, &a, &b, &c, &d); + /* TSC frequency is not enumerated */ + if (!a || !b || !c) { + tsc = cpuinfo_tsc_freq(); + return tsc; + } - return process(tool, &event, NULL, machine); + tsc = (double)c * (double)b / (double)a; + return tsc; } diff --git a/tools/perf/arch/x86/util/unwind-libdw.c b/tools/perf/arch/x86/util/unwind-libdw.c index fda8f4206ee4..eea2bf87232b 100644 --- a/tools/perf/arch/x86/util/unwind-libdw.c +++ b/tools/perf/arch/x86/util/unwind-libdw.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 #include <elfutils/libdwfl.h> -#include "../../util/unwind-libdw.h" -#include "../../util/perf_regs.h" -#include "../../util/event.h" +#include "../../../util/unwind-libdw.h" +#include "../../../util/perf_regs.h" +#include "../../../util/event.h" bool libdw__arch_set_initial_registers(Dwfl_Thread *thread, void *arg) { |