aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util')
-rw-r--r--tools/perf/util/Build5
-rw-r--r--tools/perf/util/annotate.c46
-rw-r--r--tools/perf/util/annotate.h2
-rw-r--r--tools/perf/util/bpf-loader.c2
-rw-r--r--tools/perf/util/bpf_counter.c544
-rw-r--r--tools/perf/util/bpf_counter.h9
-rw-r--r--tools/perf/util/bpf_skel/bperf.h14
-rw-r--r--tools/perf/util/bpf_skel/bperf_follower.bpf.c69
-rw-r--r--tools/perf/util/bpf_skel/bperf_leader.bpf.c46
-rw-r--r--tools/perf/util/bpf_skel/bperf_u.h14
-rw-r--r--tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c3
-rw-r--r--tools/perf/util/call-path.h2
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/perf/util/config.c9
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c40
-rw-r--r--tools/perf/util/cs-etm.c285
-rw-r--r--tools/perf/util/cs-etm.h36
-rw-r--r--tools/perf/util/data-convert-bt.c4
-rw-r--r--tools/perf/util/data-convert-bt.h11
-rw-r--r--tools/perf/util/data-convert-json.c384
-rw-r--r--tools/perf/util/data-convert.h10
-rw-r--r--tools/perf/util/demangle-java.c4
-rw-r--r--tools/perf/util/demangle-ocaml.c12
-rw-r--r--tools/perf/util/dso.h2
-rw-r--r--tools/perf/util/dwarf-aux.c6
-rw-r--r--tools/perf/util/dwarf-aux.h2
-rw-r--r--tools/perf/util/dwarf-regs.c3
-rw-r--r--tools/perf/util/event.h3
-rw-r--r--tools/perf/util/events_stats.h15
-rw-r--r--tools/perf/util/evlist-hybrid.c88
-rw-r--r--tools/perf/util/evlist-hybrid.h14
-rw-r--r--tools/perf/util/evlist.c38
-rw-r--r--tools/perf/util/evlist.h2
-rw-r--r--tools/perf/util/evsel.c38
-rw-r--r--tools/perf/util/evsel.h34
-rw-r--r--tools/perf/util/expr.h2
-rw-r--r--tools/perf/util/header.c18
-rw-r--r--tools/perf/util/hist.c37
-rw-r--r--tools/perf/util/hist.h8
-rw-r--r--tools/perf/util/intel-pt.c2
-rw-r--r--tools/perf/util/iostat.c53
-rw-r--r--tools/perf/util/iostat.h47
-rw-r--r--tools/perf/util/jitdump.c30
-rw-r--r--tools/perf/util/levenshtein.c2
-rw-r--r--tools/perf/util/libunwind/arm64.c2
-rw-r--r--tools/perf/util/libunwind/x86_32.c2
-rw-r--r--tools/perf/util/llvm-utils.c2
-rw-r--r--tools/perf/util/machine.c12
-rw-r--r--tools/perf/util/map.h4
-rw-r--r--tools/perf/util/mem-events.h3
-rw-r--r--tools/perf/util/metricgroup.c14
-rw-r--r--tools/perf/util/metricgroup.h4
-rw-r--r--tools/perf/util/parse-events-hybrid.c178
-rw-r--r--tools/perf/util/parse-events-hybrid.h23
-rw-r--r--tools/perf/util/parse-events.c115
-rw-r--r--tools/perf/util/parse-events.h9
-rw-r--r--tools/perf/util/parse-events.l2
-rw-r--r--tools/perf/util/parse-events.y9
-rw-r--r--tools/perf/util/pmu-hybrid.c89
-rw-r--r--tools/perf/util/pmu-hybrid.h22
-rw-r--r--tools/perf/util/pmu.c73
-rw-r--r--tools/perf/util/pmu.h8
-rw-r--r--tools/perf/util/probe-event.c4
-rw-r--r--tools/perf/util/probe-finder.c6
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--tools/perf/util/python.c6
-rw-r--r--tools/perf/util/s390-cpumsf.c10
-rw-r--r--tools/perf/util/s390-sample-raw.c4
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c2
-rw-r--r--tools/perf/util/session.c41
-rw-r--r--tools/perf/util/session.h3
-rw-r--r--tools/perf/util/sort.c60
-rw-r--r--tools/perf/util/sort.h2
-rw-r--r--tools/perf/util/stat-display.c64
-rw-r--r--tools/perf/util/stat-shadow.c21
-rw-r--r--tools/perf/util/stat.c3
-rw-r--r--tools/perf/util/stat.h8
-rw-r--r--tools/perf/util/strbuf.h2
-rw-r--r--tools/perf/util/strfilter.h4
-rw-r--r--tools/perf/util/symbol-elf.c2
-rw-r--r--tools/perf/util/symbol_fprintf.c2
-rw-r--r--tools/perf/util/synthetic-events.c4
-rw-r--r--tools/perf/util/syscalltbl.c4
-rw-r--r--tools/perf/util/target.h7
-rw-r--r--tools/perf/util/thread-stack.h1
-rw-r--r--tools/perf/util/tsc.c30
-rw-r--r--tools/perf/util/tsc.h4
-rw-r--r--tools/perf/util/units.c21
-rw-r--r--tools/perf/util/units.h1
-rw-r--r--tools/perf/util/unwind-libunwind-local.c2
90 files changed, 2588 insertions, 300 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build
index e3e12f9d4733..8c0d9f368ebc 100644
--- a/tools/perf/util/Build
+++ b/tools/perf/util/Build
@@ -10,6 +10,7 @@ perf-y += db-export.o
perf-y += env.o
perf-y += event.o
perf-y += evlist.o
+perf-y += evlist-hybrid.o
perf-y += sideband_evlist.o
perf-y += evsel.o
perf-y += evsel_fprintf.o
@@ -23,6 +24,7 @@ perf-y += llvm-utils.o
perf-y += mmap.o
perf-y += memswap.o
perf-y += parse-events.o
+perf-y += parse-events-hybrid.o
perf-y += perf_regs.o
perf-y += path.o
perf-y += print_binary.o
@@ -69,6 +71,7 @@ perf-y += parse-events-bison.o
perf-y += pmu.o
perf-y += pmu-flex.o
perf-y += pmu-bison.o
+perf-y += pmu-hybrid.o
perf-y += trace-event-read.o
perf-y += trace-event-info.o
perf-y += trace-event-scripting.o
@@ -102,6 +105,7 @@ perf-y += rwsem.o
perf-y += thread-stack.o
perf-y += spark.o
perf-y += topdown.o
+perf-y += iostat.o
perf-y += stream.o
perf-$(CONFIG_AUXTRACE) += auxtrace.o
perf-$(CONFIG_AUXTRACE) += intel-pt-decoder/
@@ -164,6 +168,7 @@ perf-$(CONFIG_LIBUNWIND_X86) += libunwind/x86_32.o
perf-$(CONFIG_LIBUNWIND_AARCH64) += libunwind/arm64.o
perf-$(CONFIG_LIBBABELTRACE) += data-convert-bt.o
+perf-y += data-convert-json.o
perf-y += scripting-engines/
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index e60841b86d27..abe1499a9164 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1161,6 +1161,7 @@ struct annotate_args {
s64 offset;
char *line;
int line_nr;
+ char *fileloc;
};
static void annotation_line__init(struct annotation_line *al,
@@ -1170,6 +1171,7 @@ static void annotation_line__init(struct annotation_line *al,
al->offset = args->offset;
al->line = strdup(args->line);
al->line_nr = args->line_nr;
+ al->fileloc = args->fileloc;
al->data_nr = nr;
}
@@ -1366,7 +1368,6 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
{
struct disasm_line *dl = container_of(al, struct disasm_line, al);
static const char *prev_line;
- static const char *prev_color;
if (al->offset != -1) {
double max_percent = 0.0;
@@ -1405,20 +1406,6 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
color = get_percent_color(max_percent);
- /*
- * Also color the filename and line if needed, with
- * the same color than the percentage. Don't print it
- * twice for close colored addr with the same filename:line
- */
- if (al->path) {
- if (!prev_line || strcmp(prev_line, al->path)
- || color != prev_color) {
- color_fprintf(stdout, color, " %s", al->path);
- prev_line = al->path;
- prev_color = color;
- }
- }
-
for (i = 0; i < nr_percent; i++) {
struct annotation_data *data = &al->data[i];
double percent;
@@ -1439,6 +1426,19 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
printf(" : ");
disasm_line__print(dl, start, addr_fmt_width);
+
+ /*
+ * Also color the filename and line if needed, with
+ * the same color than the percentage. Don't print it
+ * twice for close colored addr with the same filename:line
+ */
+ if (al->path) {
+ if (!prev_line || strcmp(prev_line, al->path)) {
+ color_fprintf(stdout, color, " // %s", al->path);
+ prev_line = al->path;
+ }
+ }
+
printf("\n");
} else if (max_lines && printed >= max_lines)
return 1;
@@ -1454,7 +1454,7 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
if (!*al->line)
printf(" %*s:\n", width, " ");
else
- printf(" %*s: %*s %s\n", width, " ", addr_fmt_width, " ", al->line);
+ printf(" %*s: %-*d %s\n", width, " ", addr_fmt_width, al->line_nr, al->line);
}
return 0;
@@ -1482,7 +1482,7 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start
*/
static int symbol__parse_objdump_line(struct symbol *sym,
struct annotate_args *args,
- char *parsed_line, int *line_nr)
+ char *parsed_line, int *line_nr, char **fileloc)
{
struct map *map = args->ms.map;
struct annotation *notes = symbol__annotation(sym);
@@ -1494,6 +1494,7 @@ static int symbol__parse_objdump_line(struct symbol *sym,
/* /filename:linenr ? Save line number and ignore. */
if (regexec(&file_lineno, parsed_line, 2, match, 0) == 0) {
*line_nr = atoi(parsed_line + match[1].rm_so);
+ *fileloc = strdup(parsed_line);
return 0;
}
@@ -1513,6 +1514,7 @@ static int symbol__parse_objdump_line(struct symbol *sym,
args->offset = offset;
args->line = parsed_line;
args->line_nr = *line_nr;
+ args->fileloc = *fileloc;
args->ms.sym = sym;
dl = disasm_line__new(args);
@@ -1807,6 +1809,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
args->offset = -1;
args->line = strdup(srcline);
args->line_nr = 0;
+ args->fileloc = NULL;
args->ms.sym = sym;
dl = disasm_line__new(args);
if (dl) {
@@ -1818,6 +1821,7 @@ static int symbol__disassemble_bpf(struct symbol *sym,
args->offset = pc;
args->line = buf + prev_buf_size;
args->line_nr = 0;
+ args->fileloc = NULL;
args->ms.sym = sym;
dl = disasm_line__new(args);
if (dl)
@@ -1852,6 +1856,7 @@ symbol__disassemble_bpf_image(struct symbol *sym,
args->offset = -1;
args->line = strdup("to be implemented");
args->line_nr = 0;
+ args->fileloc = NULL;
dl = disasm_line__new(args);
if (dl)
annotation_line__add(&dl->al, &notes->src->source);
@@ -1933,6 +1938,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
bool delete_extract = false;
bool decomp = false;
int lineno = 0;
+ char *fileloc = NULL;
int nline;
char *line;
size_t line_len;
@@ -2060,7 +2066,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args)
* See disasm_line__new() and struct disasm_line::line_nr.
*/
if (symbol__parse_objdump_line(sym, args, expanded_line,
- &lineno) < 0)
+ &lineno, &fileloc) < 0)
break;
nline++;
}
@@ -3144,6 +3150,10 @@ static int annotation__config(const char *var, const char *value, void *data)
opt->use_offset = perf_config_bool("use_offset", value);
} else if (!strcmp(var, "annotate.disassembler_style")) {
opt->disassembler_style = value;
+ } else if (!strcmp(var, "annotate.demangle")) {
+ symbol_conf.demangle = perf_config_bool("demangle", value);
+ } else if (!strcmp(var, "annotate.demangle_kernel")) {
+ symbol_conf.demangle_kernel = perf_config_bool("demangle_kernel", value);
} else {
pr_debug("%s variable unknown, ignoring...", var);
}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 096cdaf21b01..3757416bcf46 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -84,6 +84,7 @@ struct annotation_options {
print_lines,
full_path,
show_linenr,
+ show_fileloc,
show_nr_jumps,
show_minmax_cycle,
show_asm_raw,
@@ -136,6 +137,7 @@ struct annotation_line {
s64 offset;
char *line;
int line_nr;
+ char *fileloc;
int jump_sources;
float ipc;
u64 cycles;
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 9087f1bffd3d..fbb3c4057c30 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -671,7 +671,7 @@ int bpf__probe(struct bpf_object *obj)
* After probing, let's consider prologue, which
* adds program fetcher to BPF programs.
*
- * hook_load_preprocessorr() hooks pre-processor
+ * hook_load_preprocessor() hooks pre-processor
* to bpf_program, let it generate prologue
* dynamically during loading.
*/
diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index 04f89120b323..ddb52f748c8e 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -5,6 +5,7 @@
#include <assert.h>
#include <limits.h>
#include <unistd.h>
+#include <sys/file.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <linux/err.h>
@@ -12,14 +13,24 @@
#include <bpf/bpf.h>
#include <bpf/btf.h>
#include <bpf/libbpf.h>
+#include <api/fs/fs.h>
+#include <perf/bpf_perf.h>
#include "bpf_counter.h"
#include "counts.h"
#include "debug.h"
#include "evsel.h"
+#include "evlist.h"
#include "target.h"
+#include "cpumap.h"
+#include "thread_map.h"
#include "bpf_skel/bpf_prog_profiler.skel.h"
+#include "bpf_skel/bperf_u.h"
+#include "bpf_skel/bperf_leader.skel.h"
+#include "bpf_skel/bperf_follower.skel.h"
+
+#define ATTR_MAP_SIZE 16
static inline void *u64_to_ptr(__u64 ptr)
{
@@ -204,6 +215,17 @@ static int bpf_program_profiler__enable(struct evsel *evsel)
return 0;
}
+static int bpf_program_profiler__disable(struct evsel *evsel)
+{
+ struct bpf_counter *counter;
+
+ list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
+ assert(counter->skel != NULL);
+ bpf_prog_profiler_bpf__detach(counter->skel);
+ }
+ return 0;
+}
+
static int bpf_program_profiler__read(struct evsel *evsel)
{
// perf_cpu_map uses /sys/devices/system/cpu/online
@@ -269,22 +291,527 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
struct bpf_counter_ops bpf_program_profiler_ops = {
.load = bpf_program_profiler__load,
.enable = bpf_program_profiler__enable,
+ .disable = bpf_program_profiler__disable,
.read = bpf_program_profiler__read,
.destroy = bpf_program_profiler__destroy,
.install_pe = bpf_program_profiler__install_pe,
};
+static __u32 bpf_link_get_id(int fd)
+{
+ struct bpf_link_info link_info = {0};
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.id;
+}
+
+static __u32 bpf_link_get_prog_id(int fd)
+{
+ struct bpf_link_info link_info = {0};
+ __u32 link_info_len = sizeof(link_info);
+
+ bpf_obj_get_info_by_fd(fd, &link_info, &link_info_len);
+ return link_info.prog_id;
+}
+
+static __u32 bpf_map_get_id(int fd)
+{
+ struct bpf_map_info map_info = {0};
+ __u32 map_info_len = sizeof(map_info);
+
+ bpf_obj_get_info_by_fd(fd, &map_info, &map_info_len);
+ return map_info.id;
+}
+
+static bool bperf_attr_map_compatible(int attr_map_fd)
+{
+ struct bpf_map_info map_info = {0};
+ __u32 map_info_len = sizeof(map_info);
+ int err;
+
+ err = bpf_obj_get_info_by_fd(attr_map_fd, &map_info, &map_info_len);
+
+ if (err)
+ return false;
+ return (map_info.key_size == sizeof(struct perf_event_attr)) &&
+ (map_info.value_size == sizeof(struct perf_event_attr_map_entry));
+}
+
+static int bperf_lock_attr_map(struct target *target)
+{
+ char path[PATH_MAX];
+ int map_fd, err;
+
+ if (target->attr_map) {
+ scnprintf(path, PATH_MAX, "%s", target->attr_map);
+ } else {
+ scnprintf(path, PATH_MAX, "%s/fs/bpf/%s", sysfs__mountpoint(),
+ BPF_PERF_DEFAULT_ATTR_MAP_PATH);
+ }
+
+ if (access(path, F_OK)) {
+ map_fd = bpf_create_map(BPF_MAP_TYPE_HASH,
+ sizeof(struct perf_event_attr),
+ sizeof(struct perf_event_attr_map_entry),
+ ATTR_MAP_SIZE, 0);
+ if (map_fd < 0)
+ return -1;
+
+ err = bpf_obj_pin(map_fd, path);
+ if (err) {
+ /* someone pinned the map in parallel? */
+ close(map_fd);
+ map_fd = bpf_obj_get(path);
+ if (map_fd < 0)
+ return -1;
+ }
+ } else {
+ map_fd = bpf_obj_get(path);
+ if (map_fd < 0)
+ return -1;
+ }
+
+ if (!bperf_attr_map_compatible(map_fd)) {
+ close(map_fd);
+ return -1;
+
+ }
+ err = flock(map_fd, LOCK_EX);
+ if (err) {
+ close(map_fd);
+ return -1;
+ }
+ return map_fd;
+}
+
+/* trigger the leader program on a cpu */
+static int bperf_trigger_reading(int prog_fd, int cpu)
+{
+ DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
+ .ctx_in = NULL,
+ .ctx_size_in = 0,
+ .flags = BPF_F_TEST_RUN_ON_CPU,
+ .cpu = cpu,
+ .retval = 0,
+ );
+
+ return bpf_prog_test_run_opts(prog_fd, &opts);
+}
+
+static int bperf_check_target(struct evsel *evsel,
+ struct target *target,
+ enum bperf_filter_type *filter_type,
+ __u32 *filter_entry_cnt)
+{
+ if (evsel->leader->core.nr_members > 1) {
+ pr_err("bpf managed perf events do not yet support groups.\n");
+ return -1;
+ }
+
+ /* determine filter type based on target */
+ if (target->system_wide) {
+ *filter_type = BPERF_FILTER_GLOBAL;
+ *filter_entry_cnt = 1;
+ } else if (target->cpu_list) {
+ *filter_type = BPERF_FILTER_CPU;
+ *filter_entry_cnt = perf_cpu_map__nr(evsel__cpus(evsel));
+ } else if (target->tid) {
+ *filter_type = BPERF_FILTER_PID;
+ *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
+ } else if (target->pid || evsel->evlist->workload.pid != -1) {
+ *filter_type = BPERF_FILTER_TGID;
+ *filter_entry_cnt = perf_thread_map__nr(evsel->core.threads);
+ } else {
+ pr_err("bpf managed perf events do not yet support these targets.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static struct perf_cpu_map *all_cpu_map;
+
+static int bperf_reload_leader_program(struct evsel *evsel, int attr_map_fd,
+ struct perf_event_attr_map_entry *entry)
+{
+ struct bperf_leader_bpf *skel = bperf_leader_bpf__open();
+ int link_fd, diff_map_fd, err;
+ struct bpf_link *link = NULL;
+
+ if (!skel) {
+ pr_err("Failed to open leader skeleton\n");
+ return -1;
+ }
+
+ bpf_map__resize(skel->maps.events, libbpf_num_possible_cpus());
+ err = bperf_leader_bpf__load(skel);
+ if (err) {
+ pr_err("Failed to load leader skeleton\n");
+ goto out;
+ }
+
+ err = -1;
+ link = bpf_program__attach(skel->progs.on_switch);
+ if (!link) {
+ pr_err("Failed to attach leader program\n");
+ goto out;
+ }
+
+ link_fd = bpf_link__fd(link);
+ diff_map_fd = bpf_map__fd(skel->maps.diff_readings);
+ entry->link_id = bpf_link_get_id(link_fd);
+ entry->diff_map_id = bpf_map_get_id(diff_map_fd);
+ err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, entry, BPF_ANY);
+ assert(err == 0);
+
+ evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry->link_id);
+ assert(evsel->bperf_leader_link_fd >= 0);
+
+ /*
+ * save leader_skel for install_pe, which is called within
+ * following evsel__open_per_cpu call
+ */
+ evsel->leader_skel = skel;
+ evsel__open_per_cpu(evsel, all_cpu_map, -1);
+
+out:
+ bperf_leader_bpf__destroy(skel);
+ bpf_link__destroy(link);
+ return err;
+}
+
+static int bperf__load(struct evsel *evsel, struct target *target)
+{
+ struct perf_event_attr_map_entry entry = {0xffffffff, 0xffffffff};
+ int attr_map_fd, diff_map_fd = -1, err;
+ enum bperf_filter_type filter_type;
+ __u32 filter_entry_cnt, i;
+
+ if (bperf_check_target(evsel, target, &filter_type, &filter_entry_cnt))
+ return -1;
+
+ if (!all_cpu_map) {
+ all_cpu_map = perf_cpu_map__new(NULL);
+ if (!all_cpu_map)
+ return -1;
+ }
+
+ evsel->bperf_leader_prog_fd = -1;
+ evsel->bperf_leader_link_fd = -1;
+
+ /*
+ * Step 1: hold a fd on the leader program and the bpf_link, if
+ * the program is not already gone, reload the program.
+ * Use flock() to ensure exclusive access to the perf_event_attr
+ * map.
+ */
+ attr_map_fd = bperf_lock_attr_map(target);
+ if (attr_map_fd < 0) {
+ pr_err("Failed to lock perf_event_attr map\n");
+ return -1;
+ }
+
+ err = bpf_map_lookup_elem(attr_map_fd, &evsel->core.attr, &entry);
+ if (err) {
+ err = bpf_map_update_elem(attr_map_fd, &evsel->core.attr, &entry, BPF_ANY);
+ if (err)
+ goto out;
+ }
+
+ evsel->bperf_leader_link_fd = bpf_link_get_fd_by_id(entry.link_id);
+ if (evsel->bperf_leader_link_fd < 0 &&
+ bperf_reload_leader_program(evsel, attr_map_fd, &entry))
+ goto out;
+
+ /*
+ * The bpf_link holds reference to the leader program, and the
+ * leader program holds reference to the maps. Therefore, if
+ * link_id is valid, diff_map_id should also be valid.
+ */
+ evsel->bperf_leader_prog_fd = bpf_prog_get_fd_by_id(
+ bpf_link_get_prog_id(evsel->bperf_leader_link_fd));
+ assert(evsel->bperf_leader_prog_fd >= 0);
+
+ diff_map_fd = bpf_map_get_fd_by_id(entry.diff_map_id);
+ assert(diff_map_fd >= 0);
+
+ /*
+ * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
+ * whether the kernel support it
+ */
+ err = bperf_trigger_reading(evsel->bperf_leader_prog_fd, 0);
+ if (err) {
+ pr_err("The kernel does not support test_run for raw_tp BPF programs.\n"
+ "Therefore, --use-bpf might show inaccurate readings\n");
+ goto out;
+ }
+
+ /* Step 2: load the follower skeleton */
+ evsel->follower_skel = bperf_follower_bpf__open();
+ if (!evsel->follower_skel) {
+ pr_err("Failed to open follower skeleton\n");
+ goto out;
+ }
+
+ /* attach fexit program to the leader program */
+ bpf_program__set_attach_target(evsel->follower_skel->progs.fexit_XXX,
+ evsel->bperf_leader_prog_fd, "on_switch");
+
+ /* connect to leader diff_reading map */
+ bpf_map__reuse_fd(evsel->follower_skel->maps.diff_readings, diff_map_fd);
+
+ /* set up reading map */
+ bpf_map__set_max_entries(evsel->follower_skel->maps.accum_readings,
+ filter_entry_cnt);
+ /* set up follower filter based on target */
+ bpf_map__set_max_entries(evsel->follower_skel->maps.filter,
+ filter_entry_cnt);
+ err = bperf_follower_bpf__load(evsel->follower_skel);
+ if (err) {
+ pr_err("Failed to load follower skeleton\n");
+ bperf_follower_bpf__destroy(evsel->follower_skel);
+ evsel->follower_skel = NULL;
+ goto out;
+ }
+
+ for (i = 0; i < filter_entry_cnt; i++) {
+ int filter_map_fd;
+ __u32 key;
+
+ if (filter_type == BPERF_FILTER_PID ||
+ filter_type == BPERF_FILTER_TGID)
+ key = evsel->core.threads->map[i].pid;
+ else if (filter_type == BPERF_FILTER_CPU)
+ key = evsel->core.cpus->map[i];
+ else
+ break;
+
+ filter_map_fd = bpf_map__fd(evsel->follower_skel->maps.filter);
+ bpf_map_update_elem(filter_map_fd, &key, &i, BPF_ANY);
+ }
+
+ evsel->follower_skel->bss->type = filter_type;
+
+ err = bperf_follower_bpf__attach(evsel->follower_skel);
+
+out:
+ if (err && evsel->bperf_leader_link_fd >= 0)
+ close(evsel->bperf_leader_link_fd);
+ if (err && evsel->bperf_leader_prog_fd >= 0)
+ close(evsel->bperf_leader_prog_fd);
+ if (diff_map_fd >= 0)
+ close(diff_map_fd);
+
+ flock(attr_map_fd, LOCK_UN);
+ close(attr_map_fd);
+
+ return err;
+}
+
+static int bperf__install_pe(struct evsel *evsel, int cpu, int fd)
+{
+ struct bperf_leader_bpf *skel = evsel->leader_skel;
+
+ return bpf_map_update_elem(bpf_map__fd(skel->maps.events),
+ &cpu, &fd, BPF_ANY);
+}
+
+/*
+ * trigger the leader prog on each cpu, so the accum_reading map could get
+ * the latest readings.
+ */
+static int bperf_sync_counters(struct evsel *evsel)
+{
+ int num_cpu, i, cpu;
+
+ num_cpu = all_cpu_map->nr;
+ for (i = 0; i < num_cpu; i++) {
+ cpu = all_cpu_map->map[i];
+ bperf_trigger_reading(evsel->bperf_leader_prog_fd, cpu);
+ }
+ return 0;
+}
+
+static int bperf__enable(struct evsel *evsel)
+{
+ evsel->follower_skel->bss->enabled = 1;
+ return 0;
+}
+
+static int bperf__disable(struct evsel *evsel)
+{
+ evsel->follower_skel->bss->enabled = 0;
+ return 0;
+}
+
+static int bperf__read(struct evsel *evsel)
+{
+ struct bperf_follower_bpf *skel = evsel->follower_skel;
+ __u32 num_cpu_bpf = cpu__max_cpu();
+ struct bpf_perf_event_value values[num_cpu_bpf];
+ int reading_map_fd, err = 0;
+ __u32 i, j, num_cpu;
+
+ bperf_sync_counters(evsel);
+ reading_map_fd = bpf_map__fd(skel->maps.accum_readings);
+
+ for (i = 0; i < bpf_map__max_entries(skel->maps.accum_readings); i++) {
+ __u32 cpu;
+
+ err = bpf_map_lookup_elem(reading_map_fd, &i, values);
+ if (err)
+ goto out;
+ switch (evsel->follower_skel->bss->type) {
+ case BPERF_FILTER_GLOBAL:
+ assert(i == 0);
+
+ num_cpu = all_cpu_map->nr;
+ for (j = 0; j < num_cpu; j++) {
+ cpu = all_cpu_map->map[j];
+ perf_counts(evsel->counts, cpu, 0)->val = values[cpu].counter;
+ perf_counts(evsel->counts, cpu, 0)->ena = values[cpu].enabled;
+ perf_counts(evsel->counts, cpu, 0)->run = values[cpu].running;
+ }
+ break;
+ case BPERF_FILTER_CPU:
+ cpu = evsel->core.cpus->map[i];
+ perf_counts(evsel->counts, i, 0)->val = values[cpu].counter;
+ perf_counts(evsel->counts, i, 0)->ena = values[cpu].enabled;
+ perf_counts(evsel->counts, i, 0)->run = values[cpu].running;
+ break;
+ case BPERF_FILTER_PID:
+ case BPERF_FILTER_TGID:
+ perf_counts(evsel->counts, 0, i)->val = 0;
+ perf_counts(evsel->counts, 0, i)->ena = 0;
+ perf_counts(evsel->counts, 0, i)->run = 0;
+
+ for (cpu = 0; cpu < num_cpu_bpf; cpu++) {
+ perf_counts(evsel->counts, 0, i)->val += values[cpu].counter;
+ perf_counts(evsel->counts, 0, i)->ena += values[cpu].enabled;
+ perf_counts(evsel->counts, 0, i)->run += values[cpu].running;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+out:
+ return err;
+}
+
+static int bperf__destroy(struct evsel *evsel)
+{
+ bperf_follower_bpf__destroy(evsel->follower_skel);
+ close(evsel->bperf_leader_prog_fd);
+ close(evsel->bperf_leader_link_fd);
+ return 0;
+}
+
+/*
+ * bperf: share hardware PMCs with BPF
+ *
+ * perf uses performance monitoring counters (PMC) to monitor system
+ * performance. The PMCs are limited hardware resources. For example,
+ * Intel CPUs have 3x fixed PMCs and 4x programmable PMCs per cpu.
+ *
+ * Modern data center systems use these PMCs in many different ways:
+ * system level monitoring, (maybe nested) container level monitoring, per
+ * process monitoring, profiling (in sample mode), etc. In some cases,
+ * there are more active perf_events than available hardware PMCs. To allow
+ * all perf_events to have a chance to run, it is necessary to do expensive
+ * time multiplexing of events.
+ *
+ * On the other hand, many monitoring tools count the common metrics
+ * (cycles, instructions). It is a waste to have multiple tools create
+ * multiple perf_events of "cycles" and occupy multiple PMCs.
+ *
+ * bperf tries to reduce such wastes by allowing multiple perf_events of
+ * "cycles" or "instructions" (at different scopes) to share PMUs. Instead
+ * of having each perf-stat session to read its own perf_events, bperf uses
+ * BPF programs to read the perf_events and aggregate readings to BPF maps.
+ * Then, the perf-stat session(s) reads the values from these BPF maps.
+ *
+ * ||
+ * shared progs and maps <- || -> per session progs and maps
+ * ||
+ * --------------- ||
+ * | perf_events | ||
+ * --------------- fexit || -----------------
+ * | --------||----> | follower prog |
+ * --------------- / || --- -----------------
+ * cs -> | leader prog |/ ||/ | |
+ * --> --------------- /|| -------------- ------------------
+ * / | | / || | filter map | | accum_readings |
+ * / ------------ ------------ || -------------- ------------------
+ * | | prev map | | diff map | || |
+ * | ------------ ------------ || |
+ * \ || |
+ * = \ ==================================================== | ============
+ * \ / user space
+ * \ /
+ * \ /
+ * BPF_PROG_TEST_RUN BPF_MAP_LOOKUP_ELEM
+ * \ /
+ * \ /
+ * \------ perf-stat ----------------------/
+ *
+ * The figure above shows the architecture of bperf. Note that the figure
+ * is divided into 3 regions: shared progs and maps (top left), per session
+ * progs and maps (top right), and user space (bottom).
+ *
+ * The leader prog is triggered on each context switch (cs). The leader
+ * prog reads perf_events and stores the difference (current_reading -
+ * previous_reading) to the diff map. For the same metric, e.g. "cycles",
+ * multiple perf-stat sessions share the same leader prog.
+ *
+ * Each perf-stat session creates a follower prog as fexit program to the
+ * leader prog. It is possible to attach up to BPF_MAX_TRAMP_PROGS (38)
+ * follower progs to the same leader prog. The follower prog checks current
+ * task and processor ID to decide whether to add the value from the diff
+ * map to its accumulated reading map (accum_readings).
+ *
+ * Finally, perf-stat user space reads the value from accum_reading map.
+ *
+ * Besides context switch, it is also necessary to trigger the leader prog
+ * before perf-stat reads the value. Otherwise, the accum_reading map may
+ * not have the latest reading from the perf_events. This is achieved by
+ * triggering the event via sys_bpf(BPF_PROG_TEST_RUN) to each CPU.
+ *
+ * Comment before the definition of struct perf_event_attr_map_entry
+ * describes how different sessions of perf-stat share information about
+ * the leader prog.
+ */
+
+struct bpf_counter_ops bperf_ops = {
+ .load = bperf__load,
+ .enable = bperf__enable,
+ .disable = bperf__disable,
+ .read = bperf__read,
+ .install_pe = bperf__install_pe,
+ .destroy = bperf__destroy,
+};
+
+static inline bool bpf_counter_skip(struct evsel *evsel)
+{
+ return list_empty(&evsel->bpf_counter_list) &&
+ evsel->follower_skel == NULL;
+}
+
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd)
{
- if (list_empty(&evsel->bpf_counter_list))
+ if (bpf_counter_skip(evsel))
return 0;
return evsel->bpf_counter_ops->install_pe(evsel, cpu, fd);
}
int bpf_counter__load(struct evsel *evsel, struct target *target)
{
- if (target__has_bpf(target))
+ if (target->bpf_str)
evsel->bpf_counter_ops = &bpf_program_profiler_ops;
+ else if (target->use_bpf || evsel->bpf_counter ||
+ evsel__match_bpf_counter_events(evsel->name))
+ evsel->bpf_counter_ops = &bperf_ops;
if (evsel->bpf_counter_ops)
return evsel->bpf_counter_ops->load(evsel, target);
@@ -293,21 +820,28 @@ int bpf_counter__load(struct evsel *evsel, struct target *target)
int bpf_counter__enable(struct evsel *evsel)
{
- if (list_empty(&evsel->bpf_counter_list))
+ if (bpf_counter_skip(evsel))
return 0;
return evsel->bpf_counter_ops->enable(evsel);
}
+int bpf_counter__disable(struct evsel *evsel)
+{
+ if (bpf_counter_skip(evsel))
+ return 0;
+ return evsel->bpf_counter_ops->disable(evsel);
+}
+
int bpf_counter__read(struct evsel *evsel)
{
- if (list_empty(&evsel->bpf_counter_list))
+ if (bpf_counter_skip(evsel))
return -EAGAIN;
return evsel->bpf_counter_ops->read(evsel);
}
void bpf_counter__destroy(struct evsel *evsel)
{
- if (list_empty(&evsel->bpf_counter_list))
+ if (bpf_counter_skip(evsel))
return;
evsel->bpf_counter_ops->destroy(evsel);
evsel->bpf_counter_ops = NULL;
diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
index 2eca210e5dc1..d6d907c3dcf9 100644
--- a/tools/perf/util/bpf_counter.h
+++ b/tools/perf/util/bpf_counter.h
@@ -18,6 +18,7 @@ typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
struct bpf_counter_ops {
bpf_counter_evsel_target_op load;
bpf_counter_evsel_op enable;
+ bpf_counter_evsel_op disable;
bpf_counter_evsel_op read;
bpf_counter_evsel_op destroy;
bpf_counter_evsel_install_pe_op install_pe;
@@ -32,13 +33,14 @@ struct bpf_counter {
int bpf_counter__load(struct evsel *evsel, struct target *target);
int bpf_counter__enable(struct evsel *evsel);
+int bpf_counter__disable(struct evsel *evsel);
int bpf_counter__read(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
#else /* HAVE_BPF_SKEL */
-#include<linux/err.h>
+#include <linux/err.h>
static inline int bpf_counter__load(struct evsel *evsel __maybe_unused,
struct target *target __maybe_unused)
@@ -51,6 +53,11 @@ static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
return 0;
}
+static inline int bpf_counter__disable(struct evsel *evsel __maybe_unused)
+{
+ return 0;
+}
+
static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
{
return -EAGAIN;
diff --git a/tools/perf/util/bpf_skel/bperf.h b/tools/perf/util/bpf_skel/bperf.h
new file mode 100644
index 000000000000..186a5551ddb9
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2021 Facebook
+
+#ifndef __BPERF_STAT_H
+#define __BPERF_STAT_H
+
+typedef struct {
+ __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(struct bpf_perf_event_value));
+ __uint(max_entries, 1);
+} reading_map;
+
+#endif /* __BPERF_STAT_H */
diff --git a/tools/perf/util/bpf_skel/bperf_follower.bpf.c b/tools/perf/util/bpf_skel/bperf_follower.bpf.c
new file mode 100644
index 000000000000..b8fa3cb2da23
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf_follower.bpf.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2021 Facebook
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bperf.h"
+#include "bperf_u.h"
+
+reading_map diff_readings SEC(".maps");
+reading_map accum_readings SEC(".maps");
+
+struct {
+ __uint(type, BPF_MAP_TYPE_HASH);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(__u32));
+} filter SEC(".maps");
+
+enum bperf_filter_type type = 0;
+int enabled = 0;
+
+SEC("fexit/XXX")
+int BPF_PROG(fexit_XXX)
+{
+ struct bpf_perf_event_value *diff_val, *accum_val;
+ __u32 filter_key, zero = 0;
+ __u32 *accum_key;
+
+ if (!enabled)
+ return 0;
+
+ switch (type) {
+ case BPERF_FILTER_GLOBAL:
+ accum_key = &zero;
+ goto do_add;
+ case BPERF_FILTER_CPU:
+ filter_key = bpf_get_smp_processor_id();
+ break;
+ case BPERF_FILTER_PID:
+ filter_key = bpf_get_current_pid_tgid() & 0xffffffff;
+ break;
+ case BPERF_FILTER_TGID:
+ filter_key = bpf_get_current_pid_tgid() >> 32;
+ break;
+ default:
+ return 0;
+ }
+
+ accum_key = bpf_map_lookup_elem(&filter, &filter_key);
+ if (!accum_key)
+ return 0;
+
+do_add:
+ diff_val = bpf_map_lookup_elem(&diff_readings, &zero);
+ if (!diff_val)
+ return 0;
+
+ accum_val = bpf_map_lookup_elem(&accum_readings, accum_key);
+ if (!accum_val)
+ return 0;
+
+ accum_val->counter += diff_val->counter;
+ accum_val->enabled += diff_val->enabled;
+ accum_val->running += diff_val->running;
+
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/bperf_leader.bpf.c b/tools/perf/util/bpf_skel/bperf_leader.bpf.c
new file mode 100644
index 000000000000..4f70d1459e86
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf_leader.bpf.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2021 Facebook
+#include <linux/bpf.h>
+#include <linux/perf_event.h>
+#include <bpf/bpf_helpers.h>
+#include <bpf/bpf_tracing.h>
+#include "bperf.h"
+
+struct {
+ __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
+ __uint(key_size, sizeof(__u32));
+ __uint(value_size, sizeof(int));
+ __uint(map_flags, BPF_F_PRESERVE_ELEMS);
+} events SEC(".maps");
+
+reading_map prev_readings SEC(".maps");
+reading_map diff_readings SEC(".maps");
+
+SEC("raw_tp/sched_switch")
+int BPF_PROG(on_switch)
+{
+ struct bpf_perf_event_value val, *prev_val, *diff_val;
+ __u32 key = bpf_get_smp_processor_id();
+ __u32 zero = 0;
+ long err;
+
+ prev_val = bpf_map_lookup_elem(&prev_readings, &zero);
+ if (!prev_val)
+ return 0;
+
+ diff_val = bpf_map_lookup_elem(&diff_readings, &zero);
+ if (!diff_val)
+ return 0;
+
+ err = bpf_perf_event_read_value(&events, key, &val, sizeof(val));
+ if (err)
+ return 0;
+
+ diff_val->counter = val.counter - prev_val->counter;
+ diff_val->enabled = val.enabled - prev_val->enabled;
+ diff_val->running = val.running - prev_val->running;
+ *prev_val = val;
+ return 0;
+}
+
+char LICENSE[] SEC("license") = "Dual BSD/GPL";
diff --git a/tools/perf/util/bpf_skel/bperf_u.h b/tools/perf/util/bpf_skel/bperf_u.h
new file mode 100644
index 000000000000..1ce0c2c905c1
--- /dev/null
+++ b/tools/perf/util/bpf_skel/bperf_u.h
@@ -0,0 +1,14 @@
+// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+// Copyright (c) 2021 Facebook
+
+#ifndef __BPERF_STAT_U_H
+#define __BPERF_STAT_U_H
+
+enum bperf_filter_type {
+ BPERF_FILTER_GLOBAL = 1,
+ BPERF_FILTER_CPU,
+ BPERF_FILTER_PID,
+ BPERF_FILTER_TGID,
+};
+
+#endif /* __BPERF_STAT_U_H */
diff --git a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
index c7cec92d0236..ab12b4c4ece2 100644
--- a/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
+++ b/tools/perf/util/bpf_skel/bpf_prog_profiler.bpf.c
@@ -52,7 +52,7 @@ int BPF_PROG(fentry_XXX)
static inline void
fexit_update_maps(struct bpf_perf_event_value *after)
{
- struct bpf_perf_event_value *before, diff, *accum;
+ struct bpf_perf_event_value *before, diff;
__u32 zero = 0;
before = bpf_map_lookup_elem(&fentry_readings, &zero);
@@ -78,7 +78,6 @@ int BPF_PROG(fexit_XXX)
{
struct bpf_perf_event_value reading;
__u32 cpu = bpf_get_smp_processor_id();
- __u32 one = 1, zero = 0;
int err;
/* read all events before updating the maps, to reduce error */
diff --git a/tools/perf/util/call-path.h b/tools/perf/util/call-path.h
index 6b3229106f16..5875cfc8106e 100644
--- a/tools/perf/util/call-path.h
+++ b/tools/perf/util/call-path.h
@@ -23,7 +23,7 @@
* @children: tree of call paths of functions called
*
* In combination with the call_return structure, the call_path structure
- * defines a context-sensitve call-graph.
+ * defines a context-sensitive call-graph.
*/
struct call_path {
struct call_path *parent;
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 1b60985690bb..8e2777133bd9 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -877,7 +877,7 @@ append_chain_children(struct callchain_node *root,
if (!node)
return -1;
- /* lookup in childrens */
+ /* lookup in children */
while (*p) {
enum match_result ret;
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 6984c77068a3..63d472b336de 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -18,6 +18,7 @@
#include "util/hist.h" /* perf_hist_config */
#include "util/llvm-utils.h" /* perf_llvm_config */
#include "util/stat.h" /* perf_stat__set_big_num */
+#include "util/evsel.h" /* evsel__hw_names, evsel__use_bpf_counters */
#include "build-id.h"
#include "debug.h"
#include "config.h"
@@ -457,6 +458,12 @@ static int perf_stat_config(const char *var, const char *value)
if (!strcmp(var, "stat.big-num"))
perf_stat__set_big_num(perf_config_bool(var, value));
+ if (!strcmp(var, "stat.no-csv-summary"))
+ perf_stat__set_no_csv_summary(perf_config_bool(var, value));
+
+ if (!strcmp(var, "stat.bpf-counter-events"))
+ evsel__bpf_counter_events = strdup(value);
+
/* Add other config variables here. */
return 0;
}
@@ -699,7 +706,7 @@ static int collect_config(const char *var, const char *value,
/* perf_config_set can contain both user and system config items.
* So we should know where each value is from.
* The classification would be needed when a particular config file
- * is overwrited by setting feature i.e. set_config().
+ * is overwritten by setting feature i.e. set_config().
*/
if (strcmp(config_file_name, perf_etc_perfconfig()) == 0) {
section->from_system_config = true;
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 3f4bc4050477..059bcec3f651 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -6,6 +6,7 @@
* Author: Mathieu Poirier <mathieu.poirier@linaro.org>
*/
+#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/list.h>
#include <linux/zalloc.h>
@@ -316,7 +317,7 @@ cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
* This is the first timestamp we've seen since the beginning of traces
* or a discontinuity. Since timestamps packets are generated *after*
* range packets have been generated, we need to estimate the time at
- * which instructions started by substracting the number of instructions
+ * which instructions started by subtracting the number of instructions
* executed to the timestamp.
*/
packet_queue->timestamp = elem->timestamp - packet_queue->instr_count;
@@ -491,13 +492,42 @@ cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
const ocsd_generic_trace_elem *elem,
const uint8_t trace_chan_id)
{
- pid_t tid;
+ pid_t tid = -1;
+ static u64 pid_fmt;
+ int ret;
- /* Ignore PE_CONTEXT packets that don't have a valid contextID */
- if (!elem->context.ctxt_id_valid)
+ /*
+ * As all the ETMs run at the same exception level, the system should
+ * have the same PID format crossing CPUs. So cache the PID format
+ * and reuse it for sequential decoding.
+ */
+ if (!pid_fmt) {
+ ret = cs_etm__get_pid_fmt(trace_chan_id, &pid_fmt);
+ if (ret)
+ return OCSD_RESP_FATAL_SYS_ERR;
+ }
+
+ /*
+ * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
+ * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
+ * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
+ */
+ switch (pid_fmt) {
+ case BIT(ETM_OPT_CTXTID):
+ if (elem->context.ctxt_id_valid)
+ tid = elem->context.context_id;
+ break;
+ case BIT(ETM_OPT_CTXTID2):
+ if (elem->context.vmid_valid)
+ tid = elem->context.vmid;
+ break;
+ default:
+ break;
+ }
+
+ if (tid == -1)
return OCSD_RESP_CONT;
- tid = elem->context.context_id;
if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id))
return OCSD_RESP_FATAL_SYS_ERR;
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index a2a369e2fbb6..7e63e7dedc33 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -7,6 +7,7 @@
*/
#include <linux/bitops.h>
+#include <linux/coresight-pmu.h>
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/log2.h>
@@ -156,11 +157,52 @@ int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
return 0;
}
+/*
+ * The returned PID format is presented by two bits:
+ *
+ * Bit ETM_OPT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced;
+ * Bit ETM_OPT_CTXTID2: CONTEXTIDR_EL2 is traced.
+ *
+ * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
+ * are enabled at the same time when the session runs on an EL2 kernel.
+ * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
+ * recorded in the trace data, the tool will selectively use
+ * CONTEXTIDR_EL2 as PID.
+ */
+int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
+{
+ struct int_node *inode;
+ u64 *metadata, val;
+
+ inode = intlist__find(traceid_list, trace_chan_id);
+ if (!inode)
+ return -EINVAL;
+
+ metadata = inode->priv;
+
+ if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
+ val = metadata[CS_ETM_ETMCR];
+ /* CONTEXTIDR is traced */
+ if (val & BIT(ETM_OPT_CTXTID))
+ *pid_fmt = BIT(ETM_OPT_CTXTID);
+ } else {
+ val = metadata[CS_ETMV4_TRCCONFIGR];
+ /* CONTEXTIDR_EL2 is traced */
+ if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
+ *pid_fmt = BIT(ETM_OPT_CTXTID2);
+ /* CONTEXTIDR_EL1 is traced */
+ else if (val & BIT(ETM4_CFG_BIT_CTXTID))
+ *pid_fmt = BIT(ETM_OPT_CTXTID);
+ }
+
+ return 0;
+}
+
void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
u8 trace_chan_id)
{
/*
- * Wnen a timestamp packet is encountered the backend code
+ * When a timestamp packet is encountered the backend code
* is stopped so that the front end has time to process packets
* that were accumulated in the traceID queue. Since there can
* be more than one channel per cs_etm_queue, we need to specify
@@ -1655,7 +1697,7 @@ static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
* | 1 1 0 1 1 1 1 1 | imm8 |
* +-----------------+--------+
*
- * According to the specifiction, it only defines SVC for T32
+ * According to the specification, it only defines SVC for T32
* with 16 bits instruction and has no definition for 32bits;
* so below only read 2 bytes as instruction size for T32.
*/
@@ -1887,7 +1929,7 @@ static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
/*
* If the previous packet is an exception return packet
- * and the return address just follows SVC instuction,
+ * and the return address just follows SVC instruction,
* it needs to calibrate the previous packet sample flags
* as PERF_IP_FLAG_SYSCALLRET.
*/
@@ -1961,7 +2003,7 @@ static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
* contain exception type related info so we cannot decide
* the exception type purely based on exception return packet.
* If we record the exception number from exception packet and
- * reuse it for excpetion return packet, this is not reliable
+ * reuse it for exception return packet, this is not reliable
* due the trace can be discontinuity or the interrupt can
* be nested, thus the recorded exception number cannot be
* used for exception return packet for these two cases.
@@ -2435,7 +2477,7 @@ static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
}
static const char * const cs_etm_global_header_fmts[] = {
- [CS_HEADER_VERSION_0] = " Header version %llx\n",
+ [CS_HEADER_VERSION] = " Header version %llx\n",
[CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
[CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
};
@@ -2443,6 +2485,7 @@ static const char * const cs_etm_global_header_fmts[] = {
static const char * const cs_etm_priv_fmts[] = {
[CS_ETM_MAGIC] = " Magic number %llx\n",
[CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
[CS_ETM_ETMCR] = " ETMCR %llx\n",
[CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
[CS_ETM_ETMCCER] = " ETMCCER %llx\n",
@@ -2452,6 +2495,7 @@ static const char * const cs_etm_priv_fmts[] = {
static const char * const cs_etmv4_priv_fmts[] = {
[CS_ETM_MAGIC] = " Magic number %llx\n",
[CS_ETM_CPU] = " CPU %lld\n",
+ [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
[CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
[CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
[CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
@@ -2461,26 +2505,167 @@ static const char * const cs_etmv4_priv_fmts[] = {
[CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
};
-static void cs_etm__print_auxtrace_info(__u64 *val, int num)
+static const char * const param_unk_fmt =
+ " Unknown parameter [%d] %llx\n";
+static const char * const magic_unk_fmt =
+ " Magic number Unknown %llx\n";
+
+static int cs_etm__print_cpu_metadata_v0(__u64 *val, int *offset)
{
- int i, j, cpu = 0;
+ int i = *offset, j, nr_params = 0, fmt_offset;
+ __u64 magic;
- for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
- fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+ /* check magic value */
+ magic = val[i + CS_ETM_MAGIC];
+ if ((magic != __perf_cs_etmv3_magic) &&
+ (magic != __perf_cs_etmv4_magic)) {
+ /* failure - note bad magic value */
+ fprintf(stdout, magic_unk_fmt, magic);
+ return -EINVAL;
+ }
+
+ /* print common header block */
+ fprintf(stdout, cs_etm_priv_fmts[CS_ETM_MAGIC], val[i++]);
+ fprintf(stdout, cs_etm_priv_fmts[CS_ETM_CPU], val[i++]);
+
+ if (magic == __perf_cs_etmv3_magic) {
+ nr_params = CS_ETM_NR_TRC_PARAMS_V0;
+ fmt_offset = CS_ETM_ETMCR;
+ /* after common block, offset format index past NR_PARAMS */
+ for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
+ fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
+ } else if (magic == __perf_cs_etmv4_magic) {
+ nr_params = CS_ETMV4_NR_TRC_PARAMS_V0;
+ fmt_offset = CS_ETMV4_TRCCONFIGR;
+ /* after common block, offset format index past NR_PARAMS */
+ for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
+ fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
+ }
+ *offset = i;
+ return 0;
+}
+
+static int cs_etm__print_cpu_metadata_v1(__u64 *val, int *offset)
+{
+ int i = *offset, j, total_params = 0;
+ __u64 magic;
+
+ magic = val[i + CS_ETM_MAGIC];
+ /* total params to print is NR_PARAMS + common block size for v1 */
+ total_params = val[i + CS_ETM_NR_TRC_PARAMS] + CS_ETM_COMMON_BLK_MAX_V1;
- for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
- if (val[i] == __perf_cs_etmv3_magic)
- for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
+ if (magic == __perf_cs_etmv3_magic) {
+ for (j = 0; j < total_params; j++, i++) {
+ /* if newer record - could be excess params */
+ if (j >= CS_ETM_PRIV_MAX)
+ fprintf(stdout, param_unk_fmt, j, val[i]);
+ else
fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
- else if (val[i] == __perf_cs_etmv4_magic)
- for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
+ }
+ } else if (magic == __perf_cs_etmv4_magic) {
+ for (j = 0; j < total_params; j++, i++) {
+ /* if newer record - could be excess params */
+ if (j >= CS_ETMV4_PRIV_MAX)
+ fprintf(stdout, param_unk_fmt, j, val[i]);
+ else
fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
- else
- /* failure.. return */
+ }
+ } else {
+ /* failure - note bad magic value and error out */
+ fprintf(stdout, magic_unk_fmt, magic);
+ return -EINVAL;
+ }
+ *offset = i;
+ return 0;
+}
+
+static void cs_etm__print_auxtrace_info(__u64 *val, int num)
+{
+ int i, cpu = 0, version, err;
+
+ /* bail out early on bad header version */
+ version = val[0];
+ if (version > CS_HEADER_CURRENT_VERSION) {
+ /* failure.. return */
+ fprintf(stdout, " Unknown Header Version = %x, ", version);
+ fprintf(stdout, "Version supported <= %x\n", CS_HEADER_CURRENT_VERSION);
+ return;
+ }
+
+ for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
+ fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
+
+ for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
+ if (version == 0)
+ err = cs_etm__print_cpu_metadata_v0(val, &i);
+ else if (version == 1)
+ err = cs_etm__print_cpu_metadata_v1(val, &i);
+ if (err)
return;
}
}
+/*
+ * Read a single cpu parameter block from the auxtrace_info priv block.
+ *
+ * For version 1 there is a per cpu nr_params entry. If we are handling
+ * version 1 file, then there may be less, the same, or more params
+ * indicated by this value than the compile time number we understand.
+ *
+ * For a version 0 info block, there are a fixed number, and we need to
+ * fill out the nr_param value in the metadata we create.
+ */
+static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
+ int out_blk_size, int nr_params_v0)
+{
+ u64 *metadata = NULL;
+ int hdr_version;
+ int nr_in_params, nr_out_params, nr_cmn_params;
+ int i, k;
+
+ metadata = zalloc(sizeof(*metadata) * out_blk_size);
+ if (!metadata)
+ return NULL;
+
+ /* read block current index & version */
+ i = *buff_in_offset;
+ hdr_version = buff_in[CS_HEADER_VERSION];
+
+ if (!hdr_version) {
+ /* read version 0 info block into a version 1 metadata block */
+ nr_in_params = nr_params_v0;
+ metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
+ metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
+ metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
+ /* remaining block params at offset +1 from source */
+ for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
+ metadata[k + 1] = buff_in[i + k];
+ /* version 0 has 2 common params */
+ nr_cmn_params = 2;
+ } else {
+ /* read version 1 info block - input and output nr_params may differ */
+ /* version 1 has 3 common params */
+ nr_cmn_params = 3;
+ nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
+
+ /* if input has more params than output - skip excess */
+ nr_out_params = nr_in_params + nr_cmn_params;
+ if (nr_out_params > out_blk_size)
+ nr_out_params = out_blk_size;
+
+ for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
+ metadata[k] = buff_in[i + k];
+
+ /* record the actual nr params we copied */
+ metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
+ }
+
+ /* adjust in offset by number of in params used */
+ i += nr_in_params + nr_cmn_params;
+ *buff_in_offset = i;
+ return metadata;
+}
+
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session)
{
@@ -2492,11 +2677,12 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
int info_header_size;
int total_size = auxtrace_info->header.size;
int priv_size = 0;
- int num_cpu;
- int err = 0, idx = -1;
- int i, j, k;
+ int num_cpu, trcidr_idx;
+ int err = 0;
+ int i, j;
u64 *ptr, *hdr = NULL;
u64 **metadata = NULL;
+ u64 hdr_version;
/*
* sizeof(auxtrace_info_event::type) +
@@ -2512,16 +2698,21 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
/* First the global part */
ptr = (u64 *) auxtrace_info->priv;
- /* Look for version '0' of the header */
- if (ptr[0] != 0)
+ /* Look for version of the header */
+ hdr_version = ptr[0];
+ if (hdr_version > CS_HEADER_CURRENT_VERSION) {
+ /* print routine will print an error on bad version */
+ if (dump_trace)
+ cs_etm__print_auxtrace_info(auxtrace_info->priv, 0);
return -EINVAL;
+ }
- hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
+ hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_MAX);
if (!hdr)
return -ENOMEM;
/* Extract header information - see cs-etm.h for format */
- for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
+ for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
hdr[i] = ptr[i];
num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
@@ -2552,35 +2743,31 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
*/
for (j = 0; j < num_cpu; j++) {
if (ptr[i] == __perf_cs_etmv3_magic) {
- metadata[j] = zalloc(sizeof(*metadata[j]) *
- CS_ETM_PRIV_MAX);
- if (!metadata[j]) {
- err = -ENOMEM;
- goto err_free_metadata;
- }
- for (k = 0; k < CS_ETM_PRIV_MAX; k++)
- metadata[j][k] = ptr[i + k];
+ metadata[j] =
+ cs_etm__create_meta_blk(ptr, &i,
+ CS_ETM_PRIV_MAX,
+ CS_ETM_NR_TRC_PARAMS_V0);
/* The traceID is our handle */
- idx = metadata[j][CS_ETM_ETMTRACEIDR];
- i += CS_ETM_PRIV_MAX;
+ trcidr_idx = CS_ETM_ETMTRACEIDR;
+
} else if (ptr[i] == __perf_cs_etmv4_magic) {
- metadata[j] = zalloc(sizeof(*metadata[j]) *
- CS_ETMV4_PRIV_MAX);
- if (!metadata[j]) {
- err = -ENOMEM;
- goto err_free_metadata;
- }
- for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
- metadata[j][k] = ptr[i + k];
+ metadata[j] =
+ cs_etm__create_meta_blk(ptr, &i,
+ CS_ETMV4_PRIV_MAX,
+ CS_ETMV4_NR_TRC_PARAMS_V0);
/* The traceID is our handle */
- idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
- i += CS_ETMV4_PRIV_MAX;
+ trcidr_idx = CS_ETMV4_TRCTRACEIDR;
+ }
+
+ if (!metadata[j]) {
+ err = -ENOMEM;
+ goto err_free_metadata;
}
/* Get an RB node for this CPU */
- inode = intlist__findnew(traceid_list, idx);
+ inode = intlist__findnew(traceid_list, metadata[j][trcidr_idx]);
/* Something went wrong, no need to continue */
if (!inode) {
@@ -2601,7 +2788,7 @@ int cs_etm__process_auxtrace_info(union perf_event *event,
}
/*
- * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
+ * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
* CS_ETMV4_PRIV_MAX mark how many double words are in the
* global metadata, and each cpu's metadata respectively.
* The following tests if the correct number of double words was
@@ -2703,6 +2890,12 @@ err_free_traceid_list:
intlist__delete(traceid_list);
err_free_hdr:
zfree(&hdr);
-
+ /*
+ * At this point, as a minimum we have valid header. Dump the rest of
+ * the info section - the print routines will error out on structural
+ * issues.
+ */
+ if (dump_trace)
+ cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
return err;
}
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index 4ad925d6d799..36428918411e 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -12,28 +12,43 @@
struct perf_session;
-/* Versionning header in case things need tro change in the future. That way
+/*
+ * Versioning header in case things need to change in the future. That way
* decoding of old snapshot is still possible.
*/
enum {
/* Starting with 0x0 */
- CS_HEADER_VERSION_0,
+ CS_HEADER_VERSION,
/* PMU->type (32 bit), total # of CPUs (32 bit) */
CS_PMU_TYPE_CPUS,
CS_ETM_SNAPSHOT,
- CS_HEADER_VERSION_0_MAX,
+ CS_HEADER_VERSION_MAX,
};
+/*
+ * Update the version for new format.
+ *
+ * New version 1 format adds a param count to the per cpu metadata.
+ * This allows easy adding of new metadata parameters.
+ * Requires that new params always added after current ones.
+ * Also allows client reader to handle file versions that are different by
+ * checking the number of params in the file vs the number expected.
+ */
+#define CS_HEADER_CURRENT_VERSION 1
+
/* Beginning of header common to both ETMv3 and V4 */
enum {
CS_ETM_MAGIC,
CS_ETM_CPU,
+ /* Number of trace config params in following ETM specific block */
+ CS_ETM_NR_TRC_PARAMS,
+ CS_ETM_COMMON_BLK_MAX_V1,
};
/* ETMv3/PTM metadata */
enum {
/* Dynamic, configurable parameters */
- CS_ETM_ETMCR = CS_ETM_CPU + 1,
+ CS_ETM_ETMCR = CS_ETM_COMMON_BLK_MAX_V1,
CS_ETM_ETMTRACEIDR,
/* RO, taken from sysFS */
CS_ETM_ETMCCER,
@@ -41,10 +56,13 @@ enum {
CS_ETM_PRIV_MAX,
};
+/* define fixed version 0 length - allow new format reader to read old files. */
+#define CS_ETM_NR_TRC_PARAMS_V0 (CS_ETM_ETMIDR - CS_ETM_ETMCR + 1)
+
/* ETMv4 metadata */
enum {
/* Dynamic, configurable parameters */
- CS_ETMV4_TRCCONFIGR = CS_ETM_CPU + 1,
+ CS_ETMV4_TRCCONFIGR = CS_ETM_COMMON_BLK_MAX_V1,
CS_ETMV4_TRCTRACEIDR,
/* RO, taken from sysFS */
CS_ETMV4_TRCIDR0,
@@ -55,9 +73,12 @@ enum {
CS_ETMV4_PRIV_MAX,
};
+/* define fixed version 0 length - allow new format reader to read old files. */
+#define CS_ETMV4_NR_TRC_PARAMS_V0 (CS_ETMV4_TRCAUTHSTATUS - CS_ETMV4_TRCCONFIGR + 1)
+
/*
* ETMv3 exception encoding number:
- * See Embedded Trace Macrocell spcification (ARM IHI 0014Q)
+ * See Embedded Trace Macrocell specification (ARM IHI 0014Q)
* table 7-12 Encoding of Exception[3:0] for non-ARMv7-M processors.
*/
enum {
@@ -162,7 +183,7 @@ struct cs_etm_packet_queue {
#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
-#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_0_MAX * sizeof(u64))
+#define CS_ETM_HEADER_SIZE (CS_HEADER_VERSION_MAX * sizeof(u64))
#define __perf_cs_etmv3_magic 0x3030303030303030ULL
#define __perf_cs_etmv4_magic 0x4040404040404040ULL
@@ -173,6 +194,7 @@ struct cs_etm_packet_queue {
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
int cs_etm__get_cpu(u8 trace_chan_id, int *cpu);
+int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt);
int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
pid_t tid, u8 trace_chan_id);
bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq);
diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c
index 8b67bd97d122..cace349fb700 100644
--- a/tools/perf/util/data-convert-bt.c
+++ b/tools/perf/util/data-convert-bt.c
@@ -21,7 +21,7 @@
#include <babeltrace/ctf/events.h>
#include <traceevent/event-parse.h>
#include "asm/bug.h"
-#include "data-convert-bt.h"
+#include "data-convert.h"
#include "session.h"
#include "debug.h"
#include "tool.h"
@@ -949,7 +949,7 @@ static char *change_name(char *name, char *orig_name, int dup)
/*
* Add '_' prefix to potential keywork. According to
* Mathieu Desnoyers (https://lore.kernel.org/lkml/1074266107.40857.1422045946295.JavaMail.zimbra@efficios.com),
- * futher CTF spec updating may require us to use '$'.
+ * further CTF spec updating may require us to use '$'.
*/
if (dup < 0)
len = strlen(name) + sizeof("_");
diff --git a/tools/perf/util/data-convert-bt.h b/tools/perf/util/data-convert-bt.h
deleted file mode 100644
index 821674d63c4e..000000000000
--- a/tools/perf/util/data-convert-bt.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __DATA_CONVERT_BT_H
-#define __DATA_CONVERT_BT_H
-#include "data-convert.h"
-#ifdef HAVE_LIBBABELTRACE_SUPPORT
-
-int bt_convert__perf2ctf(const char *input_name, const char *to_ctf,
- struct perf_data_convert_opts *opts);
-
-#endif /* HAVE_LIBBABELTRACE_SUPPORT */
-#endif /* __DATA_CONVERT_BT_H */
diff --git a/tools/perf/util/data-convert-json.c b/tools/perf/util/data-convert-json.c
new file mode 100644
index 000000000000..355cd1948bdf
--- /dev/null
+++ b/tools/perf/util/data-convert-json.c
@@ -0,0 +1,384 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * JSON export.
+ *
+ * Copyright (C) 2021, CodeWeavers Inc. <nfraser@codeweavers.com>
+ */
+
+#include "data-convert.h"
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "linux/compiler.h"
+#include "linux/err.h"
+#include "util/auxtrace.h"
+#include "util/debug.h"
+#include "util/dso.h"
+#include "util/event.h"
+#include "util/evsel.h"
+#include "util/evlist.h"
+#include "util/header.h"
+#include "util/map.h"
+#include "util/session.h"
+#include "util/symbol.h"
+#include "util/thread.h"
+#include "util/tool.h"
+
+struct convert_json {
+ struct perf_tool tool;
+ FILE *out;
+ bool first;
+ u64 events_count;
+};
+
+// Outputs a JSON-encoded string surrounded by quotes with characters escaped.
+static void output_json_string(FILE *out, const char *s)
+{
+ fputc('"', out);
+ while (*s) {
+ switch (*s) {
+
+ // required escapes with special forms as per RFC 8259
+ case '"': fputs("\\\"", out); break;
+ case '\\': fputs("\\\\", out); break;
+ case '\b': fputs("\\b", out); break;
+ case '\f': fputs("\\f", out); break;
+ case '\n': fputs("\\n", out); break;
+ case '\r': fputs("\\r", out); break;
+ case '\t': fputs("\\t", out); break;
+
+ default:
+ // all other control characters must be escaped by hex code
+ if (*s <= 0x1f)
+ fprintf(out, "\\u%04x", *s);
+ else
+ fputc(*s, out);
+ break;
+ }
+
+ ++s;
+ }
+ fputc('"', out);
+}
+
+// Outputs an optional comma, newline and indentation to delimit a new value
+// from the previous one in a JSON object or array.
+static void output_json_delimiters(FILE *out, bool comma, int depth)
+{
+ int i;
+
+ if (comma)
+ fputc(',', out);
+ fputc('\n', out);
+ for (i = 0; i < depth; ++i)
+ fputc('\t', out);
+}
+
+// Outputs a printf format string (with delimiter) as a JSON value.
+__printf(4, 5)
+static void output_json_format(FILE *out, bool comma, int depth, const char *format, ...)
+{
+ va_list args;
+
+ output_json_delimiters(out, comma, depth);
+ va_start(args, format);
+ vfprintf(out, format, args);
+ va_end(args);
+}
+
+// Outputs a JSON key-value pair where the value is a string.
+static void output_json_key_string(FILE *out, bool comma, int depth,
+ const char *key, const char *value)
+{
+ output_json_delimiters(out, comma, depth);
+ output_json_string(out, key);
+ fputs(": ", out);
+ output_json_string(out, value);
+}
+
+// Outputs a JSON key-value pair where the value is a printf format string.
+__printf(5, 6)
+static void output_json_key_format(FILE *out, bool comma, int depth,
+ const char *key, const char *format, ...)
+{
+ va_list args;
+
+ output_json_delimiters(out, comma, depth);
+ output_json_string(out, key);
+ fputs(": ", out);
+ va_start(args, format);
+ vfprintf(out, format, args);
+ va_end(args);
+}
+
+static void output_sample_callchain_entry(struct perf_tool *tool,
+ u64 ip, struct addr_location *al)
+{
+ struct convert_json *c = container_of(tool, struct convert_json, tool);
+ FILE *out = c->out;
+
+ output_json_format(out, false, 4, "{");
+ output_json_key_format(out, false, 5, "ip", "\"0x%" PRIx64 "\"", ip);
+
+ if (al && al->sym && al->sym->namelen) {
+ fputc(',', out);
+ output_json_key_string(out, false, 5, "symbol", al->sym->name);
+
+ if (al->map && al->map->dso) {
+ const char *dso = al->map->dso->short_name;
+
+ if (dso && strlen(dso) > 0) {
+ fputc(',', out);
+ output_json_key_string(out, false, 5, "dso", dso);
+ }
+ }
+ }
+
+ output_json_format(out, false, 4, "}");
+}
+
+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *event __maybe_unused,
+ struct perf_sample *sample,
+ struct evsel *evsel __maybe_unused,
+ struct machine *machine)
+{
+ struct convert_json *c = container_of(tool, struct convert_json, tool);
+ FILE *out = c->out;
+ struct addr_location al, tal;
+ u8 cpumode = PERF_RECORD_MISC_USER;
+
+ if (machine__resolve(machine, &al, sample) < 0) {
+ pr_err("Sample resolution failed!\n");
+ return -1;
+ }
+
+ ++c->events_count;
+
+ if (c->first)
+ c->first = false;
+ else
+ fputc(',', out);
+ output_json_format(out, false, 2, "{");
+
+ output_json_key_format(out, false, 3, "timestamp", "%" PRIi64, sample->time);
+ output_json_key_format(out, true, 3, "pid", "%i", al.thread->pid_);
+ output_json_key_format(out, true, 3, "tid", "%i", al.thread->tid);
+
+ if (al.thread->cpu >= 0)
+ output_json_key_format(out, true, 3, "cpu", "%i", al.thread->cpu);
+
+ output_json_key_string(out, true, 3, "comm", thread__comm_str(al.thread));
+
+ output_json_key_format(out, true, 3, "callchain", "[");
+ if (sample->callchain) {
+ unsigned int i;
+ bool ok;
+ bool first_callchain = true;
+
+ for (i = 0; i < sample->callchain->nr; ++i) {
+ u64 ip = sample->callchain->ips[i];
+
+ if (ip >= PERF_CONTEXT_MAX) {
+ switch (ip) {
+ case PERF_CONTEXT_HV:
+ cpumode = PERF_RECORD_MISC_HYPERVISOR;
+ break;
+ case PERF_CONTEXT_KERNEL:
+ cpumode = PERF_RECORD_MISC_KERNEL;
+ break;
+ case PERF_CONTEXT_USER:
+ cpumode = PERF_RECORD_MISC_USER;
+ break;
+ default:
+ pr_debug("invalid callchain context: %"
+ PRId64 "\n", (s64) ip);
+ break;
+ }
+ continue;
+ }
+
+ if (first_callchain)
+ first_callchain = false;
+ else
+ fputc(',', out);
+
+ ok = thread__find_symbol(al.thread, cpumode, ip, &tal);
+ output_sample_callchain_entry(tool, ip, ok ? &tal : NULL);
+ }
+ } else {
+ output_sample_callchain_entry(tool, sample->ip, &al);
+ }
+ output_json_format(out, false, 3, "]");
+
+ output_json_format(out, false, 2, "}");
+ return 0;
+}
+
+static void output_headers(struct perf_session *session, struct convert_json *c)
+{
+ struct stat st;
+ struct perf_header *header = &session->header;
+ int ret;
+ int fd = perf_data__fd(session->data);
+ int i;
+ FILE *out = c->out;
+
+ output_json_key_format(out, false, 2, "header-version", "%u", header->version);
+
+ ret = fstat(fd, &st);
+ if (ret >= 0) {
+ time_t stctime = st.st_mtime;
+ char buf[256];
+
+ strftime(buf, sizeof(buf), "%FT%TZ", gmtime(&stctime));
+ output_json_key_string(out, true, 2, "captured-on", buf);
+ } else {
+ pr_debug("Failed to get mtime of source file, not writing captured-on");
+ }
+
+ output_json_key_format(out, true, 2, "data-offset", "%" PRIu64, header->data_offset);
+ output_json_key_format(out, true, 2, "data-size", "%" PRIu64, header->data_size);
+ output_json_key_format(out, true, 2, "feat-offset", "%" PRIu64, header->feat_offset);
+
+ output_json_key_string(out, true, 2, "hostname", header->env.hostname);
+ output_json_key_string(out, true, 2, "os-release", header->env.os_release);
+ output_json_key_string(out, true, 2, "arch", header->env.arch);
+
+ output_json_key_string(out, true, 2, "cpu-desc", header->env.cpu_desc);
+ output_json_key_string(out, true, 2, "cpuid", header->env.cpuid);
+ output_json_key_format(out, true, 2, "nrcpus-online", "%u", header->env.nr_cpus_online);
+ output_json_key_format(out, true, 2, "nrcpus-avail", "%u", header->env.nr_cpus_avail);
+
+ if (header->env.clock.enabled) {
+ output_json_key_format(out, true, 2, "clockid",
+ "%u", header->env.clock.clockid);
+ output_json_key_format(out, true, 2, "clock-time",
+ "%" PRIu64, header->env.clock.clockid_ns);
+ output_json_key_format(out, true, 2, "real-time",
+ "%" PRIu64, header->env.clock.tod_ns);
+ }
+
+ output_json_key_string(out, true, 2, "perf-version", header->env.version);
+
+ output_json_key_format(out, true, 2, "cmdline", "[");
+ for (i = 0; i < header->env.nr_cmdline; i++) {
+ output_json_delimiters(out, i != 0, 3);
+ output_json_string(c->out, header->env.cmdline_argv[i]);
+ }
+ output_json_format(out, false, 2, "]");
+}
+
+int bt_convert__perf2json(const char *input_name, const char *output_name,
+ struct perf_data_convert_opts *opts __maybe_unused)
+{
+ struct perf_session *session;
+ int fd;
+ int ret = -1;
+
+ struct convert_json c = {
+ .tool = {
+ .sample = process_sample_event,
+ .mmap = perf_event__process_mmap,
+ .mmap2 = perf_event__process_mmap2,
+ .comm = perf_event__process_comm,
+ .namespaces = perf_event__process_namespaces,
+ .cgroup = perf_event__process_cgroup,
+ .exit = perf_event__process_exit,
+ .fork = perf_event__process_fork,
+ .lost = perf_event__process_lost,
+ .tracing_data = perf_event__process_tracing_data,
+ .build_id = perf_event__process_build_id,
+ .id_index = perf_event__process_id_index,
+ .auxtrace_info = perf_event__process_auxtrace_info,
+ .auxtrace = perf_event__process_auxtrace,
+ .event_update = perf_event__process_event_update,
+ .ordered_events = true,
+ .ordering_requires_timestamps = true,
+ },
+ .first = true,
+ .events_count = 0,
+ };
+
+ struct perf_data data = {
+ .mode = PERF_DATA_MODE_READ,
+ .path = input_name,
+ .force = opts->force,
+ };
+
+ if (opts->all) {
+ pr_err("--all is currently unsupported for JSON output.\n");
+ goto err;
+ }
+ if (opts->tod) {
+ pr_err("--tod is currently unsupported for JSON output.\n");
+ goto err;
+ }
+
+ fd = open(output_name, O_CREAT | O_WRONLY | (opts->force ? O_TRUNC : O_EXCL), 0666);
+ if (fd == -1) {
+ if (errno == EEXIST)
+ pr_err("Output file exists. Use --force to overwrite it.\n");
+ else
+ pr_err("Error opening output file!\n");
+ goto err;
+ }
+
+ c.out = fdopen(fd, "w");
+ if (!c.out) {
+ fprintf(stderr, "Error opening output file!\n");
+ close(fd);
+ goto err;
+ }
+
+ session = perf_session__new(&data, false, &c.tool);
+ if (IS_ERR(session)) {
+ fprintf(stderr, "Error creating perf session!\n");
+ goto err_fclose;
+ }
+
+ if (symbol__init(&session->header.env) < 0) {
+ fprintf(stderr, "Symbol init error!\n");
+ goto err_session_delete;
+ }
+
+ // The opening brace is printed manually because it isn't delimited from a
+ // previous value (i.e. we don't want a leading newline)
+ fputc('{', c.out);
+
+ // Version number for future-proofing. Most additions should be able to be
+ // done in a backwards-compatible way so this should only need to be bumped
+ // if some major breaking change must be made.
+ output_json_format(c.out, false, 1, "\"linux-perf-json-version\": 1");
+
+ // Output headers
+ output_json_format(c.out, true, 1, "\"headers\": {");
+ output_headers(session, &c);
+ output_json_format(c.out, false, 1, "}");
+
+ // Output samples
+ output_json_format(c.out, true, 1, "\"samples\": [");
+ perf_session__process_events(session);
+ output_json_format(c.out, false, 1, "]");
+ output_json_format(c.out, false, 0, "}");
+ fputc('\n', c.out);
+
+ fprintf(stderr,
+ "[ perf data convert: Converted '%s' into JSON data '%s' ]\n",
+ data.path, output_name);
+
+ fprintf(stderr,
+ "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples) ]\n",
+ (ftell(c.out)) / 1024.0 / 1024.0, c.events_count);
+
+ ret = 0;
+err_session_delete:
+ perf_session__delete(session);
+err_fclose:
+ fclose(c.out);
+err:
+ return ret;
+}
diff --git a/tools/perf/util/data-convert.h b/tools/perf/util/data-convert.h
index feab5f114e37..1b4c5f598415 100644
--- a/tools/perf/util/data-convert.h
+++ b/tools/perf/util/data-convert.h
@@ -2,10 +2,20 @@
#ifndef __DATA_CONVERT_H
#define __DATA_CONVERT_H
+#include <stdbool.h>
+
struct perf_data_convert_opts {
bool force;
bool all;
bool tod;
};
+#ifdef HAVE_LIBBABELTRACE_SUPPORT
+int bt_convert__perf2ctf(const char *input_name, const char *to_ctf,
+ struct perf_data_convert_opts *opts);
+#endif /* HAVE_LIBBABELTRACE_SUPPORT */
+
+int bt_convert__perf2json(const char *input_name, const char *to_ctf,
+ struct perf_data_convert_opts *opts);
+
#endif /* __DATA_CONVERT_H */
diff --git a/tools/perf/util/demangle-java.c b/tools/perf/util/demangle-java.c
index 39c05200ed65..ddf33d58bcd3 100644
--- a/tools/perf/util/demangle-java.c
+++ b/tools/perf/util/demangle-java.c
@@ -147,7 +147,7 @@ error:
* Demangle Java function signature (openJDK, not GCJ)
* input:
* str: string to parse. String is not modified
- * flags: comobination of JAVA_DEMANGLE_* flags to modify demangling
+ * flags: combination of JAVA_DEMANGLE_* flags to modify demangling
* return:
* if input can be demangled, then a newly allocated string is returned.
* if input cannot be demangled, then NULL is returned
@@ -164,7 +164,7 @@ java_demangle_sym(const char *str, int flags)
if (!str)
return NULL;
- /* find start of retunr type */
+ /* find start of return type */
p = strrchr(str, ')');
if (!p)
return NULL;
diff --git a/tools/perf/util/demangle-ocaml.c b/tools/perf/util/demangle-ocaml.c
index 3df14e67c622..9d707bb60b4b 100644
--- a/tools/perf/util/demangle-ocaml.c
+++ b/tools/perf/util/demangle-ocaml.c
@@ -64,17 +64,5 @@ ocaml_demangle_sym(const char *sym)
}
result[j] = '\0';
- /* scan backwards to remove an "_" followed by decimal digits */
- if (j != 0 && isdigit(result[j - 1])) {
- while (--j) {
- if (!isdigit(result[j])) {
- break;
- }
- }
- if (result[j] == '_') {
- result[j] = '\0';
- }
- }
-
return result;
}
diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h
index cd2fe64a3c5d..52e7101c5609 100644
--- a/tools/perf/util/dso.h
+++ b/tools/perf/util/dso.h
@@ -216,7 +216,7 @@ struct dso {
/* dso__for_each_symbol - iterate over the symbols of given type
*
- * @dso: the 'struct dso *' in which symbols itereated
+ * @dso: the 'struct dso *' in which symbols are iterated
* @pos: the 'struct symbol *' to use as a loop cursor
* @n: the 'struct rb_node *' to use as a temporary storage
*/
diff --git a/tools/perf/util/dwarf-aux.c b/tools/perf/util/dwarf-aux.c
index 7b2d471a6419..b2f4920e19a6 100644
--- a/tools/perf/util/dwarf-aux.c
+++ b/tools/perf/util/dwarf-aux.c
@@ -91,7 +91,7 @@ static Dwarf_Line *cu_getsrc_die(Dwarf_Die *cu_die, Dwarf_Addr addr)
return NULL;
} while (laddr == addr);
l++;
- /* Going foward to find the statement line */
+ /* Going forward to find the statement line */
do {
line = dwarf_onesrcline(lines, l++);
if (!line || dwarf_lineaddr(line, &laddr) != 0 ||
@@ -177,7 +177,7 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
* die_get_linkage_name - Get the linkage name of the object
* @dw_die: A DIE of the object
*
- * Get the linkage name attiribute of given @dw_die.
+ * Get the linkage name attribute of given @dw_die.
* For C++ binary, the linkage name will be the mangled symbol.
*/
const char *die_get_linkage_name(Dwarf_Die *dw_die)
@@ -739,7 +739,7 @@ static int __die_walk_instances_cb(Dwarf_Die *inst, void *data)
* @data: user data
*
* Walk on the instances of give @in_die. @in_die must be an inlined function
- * declartion. This returns the return value of @callback if it returns
+ * declaration. This returns the return value of @callback if it returns
* non-zero value, or -ENOENT if there is no instance.
*/
int die_walk_instances(Dwarf_Die *or_die, int (*callback)(Dwarf_Die *, void *),
diff --git a/tools/perf/util/dwarf-aux.h b/tools/perf/util/dwarf-aux.h
index 506006e0cf66..cb99646843a9 100644
--- a/tools/perf/util/dwarf-aux.h
+++ b/tools/perf/util/dwarf-aux.h
@@ -22,7 +22,7 @@ const char *cu_get_comp_dir(Dwarf_Die *cu_die);
int cu_find_lineinfo(Dwarf_Die *cudie, unsigned long addr,
const char **fname, int *lineno);
-/* Walk on funcitons at given address */
+/* Walk on functions at given address */
int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
int (*callback)(Dwarf_Die *, void *), void *data);
diff --git a/tools/perf/util/dwarf-regs.c b/tools/perf/util/dwarf-regs.c
index 1b49ecee5aff..3fa4486742cd 100644
--- a/tools/perf/util/dwarf-regs.c
+++ b/tools/perf/util/dwarf-regs.c
@@ -24,6 +24,7 @@
#include "../arch/s390/include/dwarf-regs-table.h"
#include "../arch/sparc/include/dwarf-regs-table.h"
#include "../arch/xtensa/include/dwarf-regs-table.h"
+#include "../arch/mips/include/dwarf-regs-table.h"
#define __get_dwarf_regstr(tbl, n) (((n) < ARRAY_SIZE(tbl)) ? (tbl)[(n)] : NULL)
@@ -53,6 +54,8 @@ const char *get_dwarf_regstr(unsigned int n, unsigned int machine)
return __get_dwarf_regstr(sparc_regstr_tbl, n);
case EM_XTENSA:
return __get_dwarf_regstr(xtensa_regstr_tbl, n);
+ case EM_MIPS:
+ return __get_dwarf_regstr(mips_regstr_tbl, n);
default:
pr_err("ELF MACHINE %x is not supported.\n", machine);
}
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index f603edbbbc6f..8a62fb39e365 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -147,6 +147,7 @@ struct perf_sample {
u8 cpumode;
u16 misc;
u16 ins_lat;
+ u16 p_stage_cyc;
bool no_hw_idx; /* No hw_idx collected in branch_stack */
char insn[MAX_INSN];
void *raw_data;
@@ -427,5 +428,7 @@ char *get_page_size_name(u64 size, char *str);
void arch_perf_parse_sample_weight(struct perf_sample *data, const __u64 *array, u64 type);
void arch_perf_synthesize_sample_weight(const struct perf_sample *data, __u64 *array, u64 type);
+const char *arch_perf_header_entry(const char *se_header);
+int arch_support_sort_key(const char *sort_key);
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/events_stats.h b/tools/perf/util/events_stats.h
index 859cb34fcff2..3480bafd414b 100644
--- a/tools/perf/util/events_stats.h
+++ b/tools/perf/util/events_stats.h
@@ -21,20 +21,17 @@
* all struct perf_record_lost_samples.lost fields reported.
*
* The total_period is needed because by default auto-freq is used, so
- * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
+ * multiplying nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
* the total number of low level events, it is necessary to to sum all struct
* perf_record_sample.period and stash the result in total_period.
*/
struct events_stats {
- u64 total_period;
- u64 total_non_filtered_period;
u64 total_lost;
u64 total_lost_samples;
u64 total_aux_lost;
u64 total_aux_partial;
u64 total_invalid_chains;
u32 nr_events[PERF_RECORD_HEADER_MAX];
- u32 nr_non_filtered_samples;
u32 nr_lost_warned;
u32 nr_unknown_events;
u32 nr_invalid_chains;
@@ -44,8 +41,16 @@ struct events_stats {
u32 nr_proc_map_timeout;
};
+struct hists_stats {
+ u64 total_period;
+ u64 total_non_filtered_period;
+ u32 nr_samples;
+ u32 nr_non_filtered_samples;
+};
+
void events_stats__inc(struct events_stats *stats, u32 type);
-size_t events_stats__fprintf(struct events_stats *stats, FILE *fp);
+size_t events_stats__fprintf(struct events_stats *stats, FILE *fp,
+ bool skip_empty);
#endif /* __PERF_EVENTS_STATS_ */
diff --git a/tools/perf/util/evlist-hybrid.c b/tools/perf/util/evlist-hybrid.c
new file mode 100644
index 000000000000..db3f5fbdebe1
--- /dev/null
+++ b/tools/perf/util/evlist-hybrid.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <errno.h>
+#include <inttypes.h>
+#include "cpumap.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "../perf.h"
+#include "util/pmu-hybrid.h"
+#include "util/evlist-hybrid.h"
+#include "debug.h"
+#include <unistd.h>
+#include <stdlib.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <perf/evlist.h>
+#include <perf/evsel.h>
+#include <perf/cpumap.h>
+
+int evlist__add_default_hybrid(struct evlist *evlist, bool precise)
+{
+ struct evsel *evsel;
+ struct perf_pmu *pmu;
+ __u64 config;
+ struct perf_cpu_map *cpus;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ config = PERF_COUNT_HW_CPU_CYCLES |
+ ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT);
+ evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
+ config);
+ if (!evsel)
+ return -ENOMEM;
+
+ cpus = perf_cpu_map__get(pmu->cpus);
+ evsel->core.cpus = cpus;
+ evsel->core.own_cpus = perf_cpu_map__get(cpus);
+ evsel->pmu_name = strdup(pmu->name);
+ evlist__add(evlist, evsel);
+ }
+
+ return 0;
+}
+
+static bool group_hybrid_conflict(struct evsel *leader)
+{
+ struct evsel *pos, *prev = NULL;
+
+ for_each_group_evsel(pos, leader) {
+ if (!evsel__is_hybrid(pos))
+ continue;
+
+ if (prev && strcmp(prev->pmu_name, pos->pmu_name))
+ return true;
+
+ prev = pos;
+ }
+
+ return false;
+}
+
+void evlist__warn_hybrid_group(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_group_leader(evsel) &&
+ evsel->core.nr_members > 1 &&
+ group_hybrid_conflict(evsel)) {
+ pr_warning("WARNING: events in group from "
+ "different hybrid PMUs!\n");
+ return;
+ }
+ }
+}
+
+bool evlist__has_hybrid(struct evlist *evlist)
+{
+ struct evsel *evsel;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel->pmu_name &&
+ perf_pmu__is_hybrid(evsel->pmu_name)) {
+ return true;
+ }
+ }
+
+ return false;
+}
diff --git a/tools/perf/util/evlist-hybrid.h b/tools/perf/util/evlist-hybrid.h
new file mode 100644
index 000000000000..19f74b4c340a
--- /dev/null
+++ b/tools/perf/util/evlist-hybrid.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_EVLIST_HYBRID_H
+#define __PERF_EVLIST_HYBRID_H
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include "evlist.h"
+#include <unistd.h>
+
+int evlist__add_default_hybrid(struct evlist *evlist, bool precise);
+void evlist__warn_hybrid_group(struct evlist *evlist);
+bool evlist__has_hybrid(struct evlist *evlist);
+
+#endif /* __PERF_EVLIST_HYBRID_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 882cd1f721d9..6e5c41528c7d 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -17,6 +17,7 @@
#include "evsel.h"
#include "debug.h"
#include "units.h"
+#include "bpf_counter.h"
#include <internal/lib.h> // page_size
#include "affinity.h"
#include "../perf.h"
@@ -25,6 +26,7 @@
#include "util/string2.h"
#include "util/perf_api_probe.h"
#include "util/evsel_fprintf.h"
+#include "util/evlist-hybrid.h"
#include <signal.h>
#include <unistd.h>
#include <sched.h>
@@ -36,6 +38,7 @@
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
+#include <sys/prctl.h>
#include <linux/bitops.h>
#include <linux/hash.h>
@@ -246,8 +249,10 @@ void evlist__set_leader(struct evlist *evlist)
int __evlist__add_default(struct evlist *evlist, bool precise)
{
- struct evsel *evsel = evsel__new_cycles(precise);
+ struct evsel *evsel;
+ evsel = evsel__new_cycles(precise, PERF_TYPE_HARDWARE,
+ PERF_COUNT_HW_CPU_CYCLES);
if (evsel == NULL)
return -ENOMEM;
@@ -420,6 +425,9 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
if (affinity__setup(&affinity) < 0)
return;
+ evlist__for_each_entry(evlist, pos)
+ bpf_counter__disable(pos);
+
/* Disable 'immediate' events last */
for (imm = 0; imm <= 1; imm++) {
evlist__for_each_cpu(evlist, i, cpu) {
@@ -1209,7 +1217,7 @@ bool evlist__valid_read_format(struct evlist *evlist)
}
}
- /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
+ /* PERF_SAMPLE_READ implies PERF_FORMAT_ID. */
if ((sample_type & PERF_SAMPLE_READ) &&
!(read_format & PERF_FORMAT_ID)) {
return false;
@@ -1406,6 +1414,13 @@ int evlist__prepare_workload(struct evlist *evlist, struct target *target, const
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
/*
+ * Change the name of this process not to confuse --exclude-perf users
+ * that sees 'perf' in the window up to the execvp() and thinks that
+ * perf samples are not being excluded.
+ */
+ prctl(PR_SET_NAME, "perf-exec");
+
+ /*
* Tell the parent we're ready to go
*/
close(child_ready_pipe[1]);
@@ -2130,3 +2145,22 @@ struct evsel *evlist__find_evsel(struct evlist *evlist, int idx)
}
return NULL;
}
+
+int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf)
+{
+ struct evsel *evsel;
+ int printed = 0;
+
+ evlist__for_each_entry(evlist, evsel) {
+ if (evsel__is_dummy_event(evsel))
+ continue;
+ if (size > (strlen(evsel__name(evsel)) + (printed ? 2 : 1))) {
+ printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "," : "", evsel__name(evsel));
+ } else {
+ printed += scnprintf(bf + printed, size - printed, "%s...", printed ? "," : "");
+ break;
+ }
+ }
+
+ return printed;
+}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index b695ffaae519..a8b97b50cceb 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -365,4 +365,6 @@ int evlist__ctlfd_ack(struct evlist *evlist);
#define EVLIST_DISABLED_MSG "Events disabled\n"
struct evsel *evlist__find_evsel(struct evlist *evlist, int idx);
+
+int evlist__scnprintf_evsels(struct evlist *evlist, size_t size, char *bf);
#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 7ecbc8e2fbfa..4a3cd1b5bb33 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -47,6 +47,7 @@
#include "memswap.h"
#include "util.h"
#include "hashmap.h"
+#include "pmu-hybrid.h"
#include "../perf-sys.h"
#include "util/parse-branch-options.h"
#include <internal/xyarray.h>
@@ -295,11 +296,11 @@ static bool perf_event_can_profile_kernel(void)
return perf_event_paranoid_check(1);
}
-struct evsel *evsel__new_cycles(bool precise)
+struct evsel *evsel__new_cycles(bool precise, __u32 type, __u64 config)
{
struct perf_event_attr attr = {
- .type = PERF_TYPE_HARDWARE,
- .config = PERF_COUNT_HW_CPU_CYCLES,
+ .type = type,
+ .config = config,
.exclude_kernel = !perf_event_can_profile_kernel(),
};
struct evsel *evsel;
@@ -492,6 +493,28 @@ const char *evsel__hw_names[PERF_COUNT_HW_MAX] = {
"ref-cycles",
};
+char *evsel__bpf_counter_events;
+
+bool evsel__match_bpf_counter_events(const char *name)
+{
+ int name_len;
+ bool match;
+ char *ptr;
+
+ if (!evsel__bpf_counter_events)
+ return false;
+
+ ptr = strstr(evsel__bpf_counter_events, name);
+ name_len = strlen(name);
+
+ /* check name matches a full token in evsel__bpf_counter_events */
+ match = (ptr != NULL) &&
+ ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
+ ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
+
+ return match;
+}
+
static const char *__evsel__hw_name(u64 config)
{
if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
@@ -621,7 +644,7 @@ const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_AL
#define COP(x) (1 << x)
/*
- * cache operartion stat
+ * cache operation stat
* L1I : Read and prefetch only
* ITLB and BPU : Read-only
*/
@@ -2275,7 +2298,7 @@ int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
/*
* Undo swap of u64, then swap on individual u32s,
* get the size of the raw area and undo all of the
- * swap. The pevent interface handles endianity by
+ * swap. The pevent interface handles endianness by
* itself.
*/
if (swapped) {
@@ -2797,3 +2820,8 @@ void evsel__zero_per_pkg(struct evsel *evsel)
hashmap__clear(evsel->per_pkg_mask);
}
}
+
+bool evsel__is_hybrid(struct evsel *evsel)
+{
+ return evsel->pmu_name && perf_pmu__is_hybrid(evsel->pmu_name);
+}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
index 6026487353dd..75cf5dbfe208 100644
--- a/tools/perf/util/evsel.h
+++ b/tools/perf/util/evsel.h
@@ -20,6 +20,8 @@ union perf_event;
struct bpf_counter_ops;
struct target;
struct hashmap;
+struct bperf_leader_bpf;
+struct bperf_follower_bpf;
typedef int (evsel__sb_cb_t)(union perf_event *event, void *data);
@@ -80,6 +82,7 @@ struct evsel {
bool auto_merge_stats;
bool collect_stat;
bool weak_group;
+ bool bpf_counter;
int bpf_fd;
struct bpf_object *bpf_obj;
};
@@ -113,6 +116,7 @@ struct evsel {
bool merged_stat;
bool reset_group;
bool errored;
+ bool use_config_name;
struct hashmap *per_pkg_mask;
struct evsel *leader;
struct list_head config_terms;
@@ -130,8 +134,24 @@ struct evsel {
* See also evsel__has_callchain().
*/
__u64 synth_sample_type;
- struct list_head bpf_counter_list;
+
+ /*
+ * bpf_counter_ops serves two use cases:
+ * 1. perf-stat -b counting events used byBPF programs
+ * 2. perf-stat --use-bpf use BPF programs to aggregate counts
+ */
struct bpf_counter_ops *bpf_counter_ops;
+
+ /* for perf-stat -b */
+ struct list_head bpf_counter_list;
+
+ /* for perf-stat --use-bpf */
+ int bperf_leader_prog_fd;
+ int bperf_leader_link_fd;
+ union {
+ struct bperf_leader_bpf *leader_skel;
+ struct bperf_follower_bpf *follower_skel;
+ };
};
struct perf_missing_features {
@@ -157,7 +177,6 @@ struct perf_missing_features {
extern struct perf_missing_features perf_missing_features;
struct perf_cpu_map;
-struct target;
struct thread_map;
struct record_opts;
@@ -202,7 +221,7 @@ static inline struct evsel *evsel__newtp(const char *sys, const char *name)
return evsel__newtp_idx(sys, name, 0);
}
-struct evsel *evsel__new_cycles(bool precise);
+struct evsel *evsel__new_cycles(bool precise, __u32 type, __u64 config);
struct tep_event *event_format__new(const char *sys, const char *name);
@@ -222,6 +241,11 @@ void evsel__calc_id_pos(struct evsel *evsel);
bool evsel__is_cache_op_valid(u8 type, u8 op);
+static inline bool evsel__is_bpf(struct evsel *evsel)
+{
+ return evsel->bpf_counter_ops != NULL;
+}
+
#define EVSEL__MAX_ALIASES 8
extern const char *evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES];
@@ -229,6 +253,9 @@ extern const char *evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALI
extern const char *evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES];
extern const char *evsel__hw_names[PERF_COUNT_HW_MAX];
extern const char *evsel__sw_names[PERF_COUNT_SW_MAX];
+extern char *evsel__bpf_counter_events;
+bool evsel__match_bpf_counter_events(const char *name);
+
int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size);
const char *evsel__name(struct evsel *evsel);
@@ -435,4 +462,5 @@ struct perf_env *evsel__env(struct evsel *evsel);
int evsel__store_ids(struct evsel *evsel, struct evlist *evlist);
void evsel__zero_per_pkg(struct evsel *evsel);
+bool evsel__is_hybrid(struct evsel *evsel);
#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/expr.h b/tools/perf/util/expr.h
index dcf8d19b83c8..85df3e4771e4 100644
--- a/tools/perf/util/expr.h
+++ b/tools/perf/util/expr.h
@@ -3,7 +3,7 @@
#define PARSE_CTX_H 1
// There are fixes that need to land upstream before we can use libbpf's headers,
-// for now use our copy uncoditionally, since the data structures at this point
+// for now use our copy unconditionally, since the data structures at this point
// are exactly the same, no problem.
//#ifdef HAVE_LIBBPF_SUPPORT
//#include <bpf/hashmap.h>
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 20effdff76ce..aa1e42518d37 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -127,7 +127,7 @@ static int __do_write_buf(struct feat_fd *ff, const void *buf, size_t size)
return 0;
}
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
int do_write(struct feat_fd *ff, const void *buf, size_t size)
{
if (!ff->buf)
@@ -135,7 +135,7 @@ int do_write(struct feat_fd *ff, const void *buf, size_t size)
return __do_write_buf(ff, buf, size);
}
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
{
u64 *p = (u64 *) set;
@@ -154,7 +154,7 @@ static int do_write_bitmap(struct feat_fd *ff, unsigned long *set, u64 size)
return 0;
}
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
int write_padded(struct feat_fd *ff, const void *bf,
size_t count, size_t count_aligned)
{
@@ -170,7 +170,7 @@ int write_padded(struct feat_fd *ff, const void *bf,
#define string_size(str) \
(PERF_ALIGN((strlen(str) + 1), NAME_ALIGN) + sizeof(u32))
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
static int do_write_string(struct feat_fd *ff, const char *str)
{
u32 len, olen;
@@ -266,7 +266,7 @@ static char *do_read_string(struct feat_fd *ff)
return NULL;
}
-/* Return: 0 if succeded, -ERR if failed. */
+/* Return: 0 if succeeded, -ERR if failed. */
static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize)
{
unsigned long *set;
@@ -2874,7 +2874,7 @@ static int process_bpf_prog_info(struct feat_fd *ff, void *data __maybe_unused)
int err = -1;
if (ff->ph->needs_swap) {
- pr_warning("interpreting bpf_prog_info from systems with endianity is not yet supported\n");
+ pr_warning("interpreting bpf_prog_info from systems with endianness is not yet supported\n");
return 0;
}
@@ -2942,7 +2942,7 @@ static int process_bpf_btf(struct feat_fd *ff, void *data __maybe_unused)
int err = -1;
if (ff->ph->needs_swap) {
- pr_warning("interpreting btf from systems with endianity is not yet supported\n");
+ pr_warning("interpreting btf from systems with endianness is not yet supported\n");
return 0;
}
@@ -3481,11 +3481,11 @@ static const size_t attr_pipe_abi_sizes[] = {
};
/*
- * In the legacy pipe format, there is an implicit assumption that endiannesss
+ * In the legacy pipe format, there is an implicit assumption that endianness
* between host recording the samples, and host parsing the samples is the
* same. This is not always the case given that the pipe output may always be
* redirected into a file and analyzed on a different machine with possibly a
- * different endianness and perf_event ABI revsions in the perf tool itself.
+ * different endianness and perf_event ABI revisions in the perf tool itself.
*/
static int try_all_pipe_abis(uint64_t hdr_sz, struct perf_header *ph)
{
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index c82f5fc26af8..65fe65ba03c2 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -211,6 +211,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
+ hists__new_col_len(hists, HISTC_P_STAGE_CYC, 13);
if (symbol_conf.nanosecs)
hists__new_col_len(hists, HISTC_TIME, 16);
else
@@ -289,13 +290,14 @@ static long hist_time(unsigned long htime)
}
static void he_stat__add_period(struct he_stat *he_stat, u64 period,
- u64 weight, u64 ins_lat)
+ u64 weight, u64 ins_lat, u64 p_stage_cyc)
{
he_stat->period += period;
he_stat->weight += weight;
he_stat->nr_events += 1;
he_stat->ins_lat += ins_lat;
+ he_stat->p_stage_cyc += p_stage_cyc;
}
static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
@@ -308,6 +310,7 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
dest->nr_events += src->nr_events;
dest->weight += src->weight;
dest->ins_lat += src->ins_lat;
+ dest->p_stage_cyc += src->p_stage_cyc;
}
static void he_stat__decay(struct he_stat *he_stat)
@@ -597,6 +600,7 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
u64 period = entry->stat.period;
u64 weight = entry->stat.weight;
u64 ins_lat = entry->stat.ins_lat;
+ u64 p_stage_cyc = entry->stat.p_stage_cyc;
bool leftmost = true;
p = &hists->entries_in->rb_root.rb_node;
@@ -615,11 +619,11 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (!cmp) {
if (sample_self) {
- he_stat__add_period(&he->stat, period, weight, ins_lat);
+ he_stat__add_period(&he->stat, period, weight, ins_lat, p_stage_cyc);
hist_entry__add_callchain_period(he, period);
}
if (symbol_conf.cumulate_callchain)
- he_stat__add_period(he->stat_acc, period, weight, ins_lat);
+ he_stat__add_period(he->stat_acc, period, weight, ins_lat, p_stage_cyc);
/*
* This mem info was allocated from sample__resolve_mem
@@ -731,6 +735,7 @@ __hists__add_entry(struct hists *hists,
.period = sample->period,
.weight = sample->weight,
.ins_lat = sample->ins_lat,
+ .p_stage_cyc = sample->p_stage_cyc,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
@@ -2320,14 +2325,19 @@ void events_stats__inc(struct events_stats *stats, u32 type)
++stats->nr_events[type];
}
-void hists__inc_nr_events(struct hists *hists, u32 type)
+static void hists_stats__inc(struct hists_stats *stats)
{
- events_stats__inc(&hists->stats, type);
+ ++stats->nr_samples;
+}
+
+void hists__inc_nr_events(struct hists *hists)
+{
+ hists_stats__inc(&hists->stats);
}
void hists__inc_nr_samples(struct hists *hists, bool filtered)
{
- events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
+ hists_stats__inc(&hists->stats);
if (!filtered)
hists->stats.nr_non_filtered_samples++;
}
@@ -2666,14 +2676,21 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
}
}
-size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
+size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
+ bool skip_empty)
{
struct evsel *pos;
size_t ret = 0;
evlist__for_each_entry(evlist, pos) {
+ struct hists *hists = evsel__hists(pos);
+
+ if (skip_empty && !hists->stats.nr_samples)
+ continue;
+
ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
- ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
+ ret += fprintf(fp, "%16s events: %10d\n",
+ "SAMPLE", hists->stats.nr_samples);
}
return ret;
@@ -2693,7 +2710,7 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
const struct dso *dso = hists->dso_filter;
struct thread *thread = hists->thread_filter;
int socket_id = hists->socket_filter;
- unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ unsigned long nr_samples = hists->stats.nr_samples;
u64 nr_events = hists->stats.total_period;
struct evsel *evsel = hists_to_evsel(hists);
const char *ev_name = evsel__name(evsel);
@@ -2720,7 +2737,7 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
nr_samples += pos_hists->stats.nr_non_filtered_samples;
nr_events += pos_hists->stats.total_non_filtered_period;
} else {
- nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ nr_samples += pos_hists->stats.nr_samples;
nr_events += pos_hists->stats.total_period;
}
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 3c537232294b..5343b62476e6 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -75,6 +75,7 @@ enum hist_column {
HISTC_MEM_BLOCKED,
HISTC_LOCAL_INS_LAT,
HISTC_GLOBAL_INS_LAT,
+ HISTC_P_STAGE_CYC,
HISTC_NR_COLS, /* Last entry */
};
@@ -95,7 +96,7 @@ struct hists {
const char *uid_filter_str;
const char *symbol_filter_str;
pthread_mutex_t lock;
- struct events_stats stats;
+ struct hists_stats stats;
u64 event_stream;
u16 col_len[HISTC_NR_COLS];
bool has_callchains;
@@ -195,13 +196,14 @@ struct hist_entry *hists__get_entry(struct hists *hists, int idx);
u64 hists__total_period(struct hists *hists);
void hists__reset_stats(struct hists *hists);
void hists__inc_stats(struct hists *hists, struct hist_entry *h);
-void hists__inc_nr_events(struct hists *hists, u32 type);
+void hists__inc_nr_events(struct hists *hists);
void hists__inc_nr_samples(struct hists *hists, bool filtered);
size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp,
bool ignore_callchains);
-size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp);
+size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
+ bool skip_empty);
void hists__filter_by_dso(struct hists *hists);
void hists__filter_by_thread(struct hists *hists);
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index f6e28ac231b7..8658d42ce57a 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -3569,7 +3569,7 @@ int intel_pt_process_auxtrace_info(union perf_event *event,
/*
* Since this thread will not be kept in any rbtree not in a
* list, initialize its list node so that at thread__put() the
- * current thread lifetime assuption is kept and we don't segfault
+ * current thread lifetime assumption is kept and we don't segfault
* at list_del_init().
*/
INIT_LIST_HEAD(&pt->unknown_thread->node);
diff --git a/tools/perf/util/iostat.c b/tools/perf/util/iostat.c
new file mode 100644
index 000000000000..57dd49da28fe
--- /dev/null
+++ b/tools/perf/util/iostat.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "util/iostat.h"
+#include "util/debug.h"
+
+enum iostat_mode_t iostat_mode = IOSTAT_NONE;
+
+__weak int iostat_prepare(struct evlist *evlist __maybe_unused,
+ struct perf_stat_config *config __maybe_unused)
+{
+ return -1;
+}
+
+__weak int iostat_parse(const struct option *opt __maybe_unused,
+ const char *str __maybe_unused,
+ int unset __maybe_unused)
+{
+ pr_err("iostat mode is not supported on current platform\n");
+ return -1;
+}
+
+__weak void iostat_list(struct evlist *evlist __maybe_unused,
+ struct perf_stat_config *config __maybe_unused)
+{
+}
+
+__weak void iostat_release(struct evlist *evlist __maybe_unused)
+{
+}
+
+__weak void iostat_print_header_prefix(struct perf_stat_config *config __maybe_unused)
+{
+}
+
+__weak void iostat_print_metric(struct perf_stat_config *config __maybe_unused,
+ struct evsel *evsel __maybe_unused,
+ struct perf_stat_output_ctx *out __maybe_unused)
+{
+}
+
+__weak void iostat_prefix(struct evlist *evlist __maybe_unused,
+ struct perf_stat_config *config __maybe_unused,
+ char *prefix __maybe_unused,
+ struct timespec *ts __maybe_unused)
+{
+}
+
+__weak void iostat_print_counters(struct evlist *evlist __maybe_unused,
+ struct perf_stat_config *config __maybe_unused,
+ struct timespec *ts __maybe_unused,
+ char *prefix __maybe_unused,
+ iostat_print_counter_t print_cnt_cb __maybe_unused)
+{
+}
diff --git a/tools/perf/util/iostat.h b/tools/perf/util/iostat.h
new file mode 100644
index 000000000000..23c1c46a331a
--- /dev/null
+++ b/tools/perf/util/iostat.h
@@ -0,0 +1,47 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * perf iostat
+ *
+ * Copyright (C) 2020, Intel Corporation
+ *
+ * Authors: Alexander Antonov <alexander.antonov@linux.intel.com>
+ */
+
+#ifndef _IOSTAT_H
+#define _IOSTAT_H
+
+#include <subcmd/parse-options.h>
+#include "util/stat.h"
+#include "util/parse-events.h"
+#include "util/evlist.h"
+
+struct option;
+struct perf_stat_config;
+struct evlist;
+struct timespec;
+
+enum iostat_mode_t {
+ IOSTAT_NONE = -1,
+ IOSTAT_RUN = 0,
+ IOSTAT_LIST = 1
+};
+
+extern enum iostat_mode_t iostat_mode;
+
+typedef void (*iostat_print_counter_t)(struct perf_stat_config *, struct evsel *, char *);
+
+int iostat_prepare(struct evlist *evlist, struct perf_stat_config *config);
+int iostat_parse(const struct option *opt, const char *str,
+ int unset __maybe_unused);
+void iostat_list(struct evlist *evlist, struct perf_stat_config *config);
+void iostat_release(struct evlist *evlist);
+void iostat_prefix(struct evlist *evlist, struct perf_stat_config *config,
+ char *prefix, struct timespec *ts);
+void iostat_print_header_prefix(struct perf_stat_config *config);
+void iostat_print_metric(struct perf_stat_config *config, struct evsel *evsel,
+ struct perf_stat_output_ctx *out);
+void iostat_print_counters(struct evlist *evlist,
+ struct perf_stat_config *config, struct timespec *ts,
+ char *prefix, iostat_print_counter_t print_cnt_cb);
+
+#endif /* _IOSTAT_H */
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 9760d8e7b386..917a9c707371 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -396,21 +396,31 @@ static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
{
- struct perf_tsc_conversion tc;
+ struct perf_tsc_conversion tc = { .time_shift = 0, };
+ struct perf_record_time_conv *time_conv = &jd->session->time_conv;
if (!jd->use_arch_timestamp)
return timestamp;
- tc.time_shift = jd->session->time_conv.time_shift;
- tc.time_mult = jd->session->time_conv.time_mult;
- tc.time_zero = jd->session->time_conv.time_zero;
- tc.time_cycles = jd->session->time_conv.time_cycles;
- tc.time_mask = jd->session->time_conv.time_mask;
- tc.cap_user_time_zero = jd->session->time_conv.cap_user_time_zero;
- tc.cap_user_time_short = jd->session->time_conv.cap_user_time_short;
+ tc.time_shift = time_conv->time_shift;
+ tc.time_mult = time_conv->time_mult;
+ tc.time_zero = time_conv->time_zero;
- if (!tc.cap_user_time_zero)
- return 0;
+ /*
+ * The event TIME_CONV was extended for the fields from "time_cycles"
+ * when supported cap_user_time_short, for backward compatibility,
+ * checks the event size and assigns these extended fields if these
+ * fields are contained in the event.
+ */
+ if (event_contains(*time_conv, time_cycles)) {
+ tc.time_cycles = time_conv->time_cycles;
+ tc.time_mask = time_conv->time_mask;
+ tc.cap_user_time_zero = time_conv->cap_user_time_zero;
+ tc.cap_user_time_short = time_conv->cap_user_time_short;
+
+ if (!tc.cap_user_time_zero)
+ return 0;
+ }
return tsc_to_perf_time(timestamp, &tc);
}
diff --git a/tools/perf/util/levenshtein.c b/tools/perf/util/levenshtein.c
index a217ecf0359d..6a6712635aa4 100644
--- a/tools/perf/util/levenshtein.c
+++ b/tools/perf/util/levenshtein.c
@@ -30,7 +30,7 @@
*
* It does so by calculating the costs of the path ending in characters
* i (in string1) and j (in string2), respectively, given that the last
- * operation is a substition, a swap, a deletion, or an insertion.
+ * operation is a substitution, a swap, a deletion, or an insertion.
*
* This implementation allows the costs to be weighted:
*
diff --git a/tools/perf/util/libunwind/arm64.c b/tools/perf/util/libunwind/arm64.c
index 6b4e5a0892f8..c397be0c2e32 100644
--- a/tools/perf/util/libunwind/arm64.c
+++ b/tools/perf/util/libunwind/arm64.c
@@ -4,7 +4,7 @@
* generic one.
*
* The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch
- * name and the defination of this function is included directly from
+ * name and the definition of this function is included directly from
* 'arch/arm64/util/unwind-libunwind.c', to make sure that this function
* is defined no matter what arch the host is.
*
diff --git a/tools/perf/util/libunwind/x86_32.c b/tools/perf/util/libunwind/x86_32.c
index 21c216c40a3b..b2b92d030aef 100644
--- a/tools/perf/util/libunwind/x86_32.c
+++ b/tools/perf/util/libunwind/x86_32.c
@@ -4,7 +4,7 @@
* generic one.
*
* The function 'LIBUNWIND__ARCH_REG_ID' name is set according to arch
- * name and the defination of this function is included directly from
+ * name and the definition of this function is included directly from
* 'arch/x86/util/unwind-libunwind.c', to make sure that this function
* is defined no matter what arch the host is.
*
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index dbdffb6673fe..3ceaf7ef3301 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -471,7 +471,7 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf,
/*
* This is an optional work. Even it fail we can continue our
- * work. Needn't to check error return.
+ * work. Needn't check error return.
*/
llvm__get_kbuild_opts(&kbuild_dir, &kbuild_include_opts);
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index b5c2d8be4144..3ff4936a15a4 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -905,7 +905,7 @@ static struct map *machine__addnew_module_map(struct machine *machine, u64 start
maps__insert(&machine->kmaps, map);
- /* Put the map here because maps__insert alread got it */
+ /* Put the map here because maps__insert already got it */
map__put(map);
out:
/* put the dso here, corresponding to machine__findnew_module_dso */
@@ -1952,7 +1952,7 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
* maps because that is what the kernel just did.
*
* But when synthesizing, this should not be done. If we do, we end up
- * with overlapping maps as we process the sythesized MMAP2 events that
+ * with overlapping maps as we process the synthesized MMAP2 events that
* get delivered shortly thereafter.
*
* Use the FORK event misc flags in an internal way to signal this
@@ -2038,8 +2038,8 @@ int machine__process_event(struct machine *machine, union perf_event *event,
static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
{
if (!regexec(regex, sym->name, 0, NULL, 0))
- return 1;
- return 0;
+ return true;
+ return false;
}
static void ip__resolve_ams(struct thread *thread,
@@ -2518,7 +2518,7 @@ static bool has_stitched_lbr(struct thread *thread,
/*
* Check if there are identical LBRs between two samples.
- * Identicall LBRs must have same from, to and flags values. Also,
+ * Identical LBRs must have same from, to and flags values. Also,
* they have to be saved in the same LBR registers (same physical
* index).
*
@@ -2588,7 +2588,7 @@ err:
}
/*
- * Recolve LBR callstack chain sample
+ * Resolve LBR callstack chain sample
* Return:
* 1 on success get LBR callchain information
* 0 no available LBR callchain information, should try fp
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 9f32825c98d8..d32f5b28c1fb 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -75,7 +75,7 @@ struct thread;
/* map__for_each_symbol - iterate over the symbols in the given map
*
- * @map: the 'struct map *' in which symbols itereated
+ * @map: the 'struct map *' in which symbols are iterated
* @pos: the 'struct symbol *' to use as a loop cursor
* @n: the 'struct rb_node *' to use as a temporary storage
* Note: caller must ensure map->dso is not NULL (map is loaded).
@@ -86,7 +86,7 @@ struct thread;
/* map__for_each_symbol_with_name - iterate over the symbols in the given map
* that have the given name
*
- * @map: the 'struct map *' in which symbols itereated
+ * @map: the 'struct map *' in which symbols are iterated
* @sym_name: the symbol name
* @pos: the 'struct symbol *' to use as a loop cursor
*/
diff --git a/tools/perf/util/mem-events.h b/tools/perf/util/mem-events.h
index 755cef7e0625..cacdebd65b8a 100644
--- a/tools/perf/util/mem-events.h
+++ b/tools/perf/util/mem-events.h
@@ -44,7 +44,6 @@ bool is_mem_loads_aux_event(struct evsel *leader);
void perf_mem_events__list(void);
-struct mem_info;
int perf_mem__tlb_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__lvl_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
int perf_mem__snp_scnprintf(char *out, size_t sz, struct mem_info *mem_info);
@@ -81,7 +80,7 @@ struct c2c_stats {
u32 rmt_dram; /* count of loads miss to remote DRAM */
u32 blk_data; /* count of loads blocked by data */
u32 blk_addr; /* count of loads blocked by address conflict */
- u32 nomap; /* count of load/stores with no phys adrs */
+ u32 nomap; /* count of load/stores with no phys addrs */
u32 noparse; /* count of unparsable data sources */
};
diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
index 26c990e32378..8336dd8e8098 100644
--- a/tools/perf/util/metricgroup.c
+++ b/tools/perf/util/metricgroup.c
@@ -181,7 +181,7 @@ static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
* @pctx: the parse context for the metric expression.
* @metric_no_merge: don't attempt to share events for the metric with other
* metrics.
- * @has_constraint: is there a contraint on the group of events? In which case
+ * @has_constraint: is there a constraint on the group of events? In which case
* the events won't be grouped.
* @metric_events: out argument, null terminated array of evsel's associated
* with the metric.
@@ -618,7 +618,7 @@ static int metricgroup__print_sys_event_iter(struct pmu_event *pe, void *data)
void metricgroup__print(bool metrics, bool metricgroups, char *filter,
bool raw, bool details)
{
- struct pmu_events_map *map = perf_pmu__find_map(NULL);
+ struct pmu_events_map *map = pmu_events_map__find();
struct pmu_event *pe;
int i;
struct rblist groups;
@@ -900,7 +900,8 @@ static int __add_metric(struct list_head *metric_list,
(match_metric(__pe->metric_group, __metric) || \
match_metric(__pe->metric_name, __metric)))
-static struct pmu_event *find_metric(const char *metric, struct pmu_events_map *map)
+struct pmu_event *metricgroup__find_metric(const char *metric,
+ struct pmu_events_map *map)
{
struct pmu_event *pe;
int i;
@@ -985,7 +986,7 @@ static int __resolve_metric(struct metric *m,
struct expr_id *parent;
struct pmu_event *pe;
- pe = find_metric(cur->key, map);
+ pe = metricgroup__find_metric(cur->key, map);
if (!pe)
continue;
@@ -1253,8 +1254,7 @@ int metricgroup__parse_groups(const struct option *opt,
struct rblist *metric_events)
{
struct evlist *perf_evlist = *(struct evlist **)opt->value;
- struct pmu_events_map *map = perf_pmu__find_map(NULL);
-
+ struct pmu_events_map *map = pmu_events_map__find();
return parse_groups(perf_evlist, str, metric_no_group,
metric_no_merge, NULL, metric_events, map);
@@ -1273,7 +1273,7 @@ int metricgroup__parse_groups_test(struct evlist *evlist,
bool metricgroup__has_metric(const char *metric)
{
- struct pmu_events_map *map = perf_pmu__find_map(NULL);
+ struct pmu_events_map *map = pmu_events_map__find();
struct pmu_event *pe;
int i;
diff --git a/tools/perf/util/metricgroup.h b/tools/perf/util/metricgroup.h
index ed1b9392e624..cc4a92492a61 100644
--- a/tools/perf/util/metricgroup.h
+++ b/tools/perf/util/metricgroup.h
@@ -9,7 +9,6 @@
struct evlist;
struct evsel;
-struct evlist;
struct option;
struct rblist;
struct pmu_events_map;
@@ -44,7 +43,8 @@ int metricgroup__parse_groups(const struct option *opt,
bool metric_no_group,
bool metric_no_merge,
struct rblist *metric_events);
-
+struct pmu_event *metricgroup__find_metric(const char *metric,
+ struct pmu_events_map *map);
int metricgroup__parse_groups_test(struct evlist *evlist,
struct pmu_events_map *map,
const char *str,
diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c
new file mode 100644
index 000000000000..10160ab126f9
--- /dev/null
+++ b/tools/perf/util/parse-events-hybrid.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/err.h>
+#include <linux/zalloc.h>
+#include <errno.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/param.h>
+#include "evlist.h"
+#include "evsel.h"
+#include "parse-events.h"
+#include "parse-events-hybrid.h"
+#include "debug.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"
+#include "perf.h"
+
+static void config_hybrid_attr(struct perf_event_attr *attr,
+ int type, int pmu_type)
+{
+ /*
+ * attr.config layout for type PERF_TYPE_HARDWARE and
+ * PERF_TYPE_HW_CACHE
+ *
+ * PERF_TYPE_HARDWARE: 0xEEEEEEEE000000AA
+ * AA: hardware event ID
+ * EEEEEEEE: PMU type ID
+ * PERF_TYPE_HW_CACHE: 0xEEEEEEEE00DDCCBB
+ * BB: hardware cache ID
+ * CC: hardware cache op ID
+ * DD: hardware cache op result ID
+ * EEEEEEEE: PMU type ID
+ * If the PMU type ID is 0, the PERF_TYPE_RAW will be applied.
+ */
+ attr->type = type;
+ attr->config = attr->config | ((__u64)pmu_type << PERF_PMU_TYPE_SHIFT);
+}
+
+static int create_event_hybrid(__u32 config_type, int *idx,
+ struct list_head *list,
+ struct perf_event_attr *attr, char *name,
+ struct list_head *config_terms,
+ struct perf_pmu *pmu)
+{
+ struct evsel *evsel;
+ __u32 type = attr->type;
+ __u64 config = attr->config;
+
+ config_hybrid_attr(attr, config_type, pmu->type);
+ evsel = parse_events__add_event_hybrid(list, idx, attr, name,
+ pmu, config_terms);
+ if (evsel)
+ evsel->pmu_name = strdup(pmu->name);
+ else
+ return -ENOMEM;
+
+ attr->type = type;
+ attr->config = config;
+ return 0;
+}
+
+static int pmu_cmp(struct parse_events_state *parse_state,
+ struct perf_pmu *pmu)
+{
+ if (!parse_state->hybrid_pmu_name)
+ return 0;
+
+ return strcmp(parse_state->hybrid_pmu_name, pmu->name);
+}
+
+static int add_hw_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list, struct perf_event_attr *attr,
+ char *name, struct list_head *config_terms)
+{
+ struct perf_pmu *pmu;
+ int ret;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (pmu_cmp(parse_state, pmu))
+ continue;
+
+ ret = create_event_hybrid(PERF_TYPE_HARDWARE,
+ &parse_state->idx, list, attr, name,
+ config_terms, pmu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int create_raw_event_hybrid(int *idx, struct list_head *list,
+ struct perf_event_attr *attr, char *name,
+ struct list_head *config_terms,
+ struct perf_pmu *pmu)
+{
+ struct evsel *evsel;
+
+ attr->type = pmu->type;
+ evsel = parse_events__add_event_hybrid(list, idx, attr, name,
+ pmu, config_terms);
+ if (evsel)
+ evsel->pmu_name = strdup(pmu->name);
+ else
+ return -ENOMEM;
+
+ return 0;
+}
+
+static int add_raw_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list, struct perf_event_attr *attr,
+ char *name, struct list_head *config_terms)
+{
+ struct perf_pmu *pmu;
+ int ret;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (pmu_cmp(parse_state, pmu))
+ continue;
+
+ ret = create_raw_event_hybrid(&parse_state->idx, list, attr,
+ name, config_terms, pmu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list,
+ struct perf_event_attr *attr,
+ char *name, struct list_head *config_terms,
+ bool *hybrid)
+{
+ *hybrid = false;
+ if (attr->type == PERF_TYPE_SOFTWARE)
+ return 0;
+
+ if (!perf_pmu__has_hybrid())
+ return 0;
+
+ *hybrid = true;
+ if (attr->type != PERF_TYPE_RAW) {
+ return add_hw_hybrid(parse_state, list, attr, name,
+ config_terms);
+ }
+
+ return add_raw_hybrid(parse_state, list, attr, name,
+ config_terms);
+}
+
+int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr, char *name,
+ struct list_head *config_terms,
+ bool *hybrid,
+ struct parse_events_state *parse_state)
+{
+ struct perf_pmu *pmu;
+ int ret;
+
+ *hybrid = false;
+ if (!perf_pmu__has_hybrid())
+ return 0;
+
+ *hybrid = true;
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (pmu_cmp(parse_state, pmu))
+ continue;
+
+ ret = create_event_hybrid(PERF_TYPE_HW_CACHE, idx, list,
+ attr, name, config_terms, pmu);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
diff --git a/tools/perf/util/parse-events-hybrid.h b/tools/perf/util/parse-events-hybrid.h
new file mode 100644
index 000000000000..f33bd67aa851
--- /dev/null
+++ b/tools/perf/util/parse-events-hybrid.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PERF_PARSE_EVENTS_HYBRID_H
+#define __PERF_PARSE_EVENTS_HYBRID_H
+
+#include <linux/list.h>
+#include <stdbool.h>
+#include <linux/types.h>
+#include <linux/perf_event.h>
+#include <string.h>
+
+int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list,
+ struct perf_event_attr *attr,
+ char *name, struct list_head *config_terms,
+ bool *hybrid);
+
+int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr, char *name,
+ struct list_head *config_terms,
+ bool *hybrid,
+ struct parse_events_state *parse_state);
+
+#endif /* __PERF_PARSE_EVENTS_HYBRID_H */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c0c0fab22cb8..4dad14265b81 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -37,6 +37,8 @@
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/pfm.h"
+#include "util/parse-events-hybrid.h"
+#include "util/pmu-hybrid.h"
#include "perf.h"
#define MAX_NAME_LEN 100
@@ -47,6 +49,9 @@ extern int parse_events_debug;
int parse_events_parse(void *parse_state, void *scanner);
static int get_config_terms(struct list_head *head_config,
struct list_head *head_terms __maybe_unused);
+static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
+ const char *str, char *pmu_name,
+ struct list_head *list);
static struct perf_pmu_event_symbol *perf_pmu_events_list;
/*
@@ -452,14 +457,16 @@ static int config_attr(struct perf_event_attr *attr,
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2,
struct parse_events_error *err,
- struct list_head *head_config)
+ struct list_head *head_config,
+ struct parse_events_state *parse_state)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
char name[MAX_NAME_LEN], *config_name;
int cache_type = -1, cache_op = -1, cache_result = -1;
char *op_result[2] = { op_result1, op_result2 };
- int i, n;
+ int i, n, ret;
+ bool hybrid;
/*
* No fallback - if we cannot get a clear cache type
@@ -519,6 +526,13 @@ int parse_events_add_cache(struct list_head *list, int *idx,
if (get_config_terms(head_config, &config_terms))
return -ENOMEM;
}
+
+ ret = parse_events__add_cache_hybrid(list, idx, &attr,
+ config_name ? : name, &config_terms,
+ &hybrid, parse_state);
+ if (hybrid)
+ return ret;
+
return add_event(list, idx, &attr, config_name ? : name, &config_terms);
}
@@ -846,9 +860,9 @@ split_bpf_config_terms(struct list_head *evt_head_config,
struct parse_events_term *term, *temp;
/*
- * Currectly, all possible user config term
+ * Currently, all possible user config term
* belong to bpf object. parse_events__is_hardcoded_term()
- * happends to be a good flag.
+ * happens to be a good flag.
*
* See parse_events_config_bpf() and
* config_term_tracepoint().
@@ -898,7 +912,7 @@ int parse_events_load_bpf(struct parse_events_state *parse_state,
/*
* Caller doesn't know anything about obj_head_config,
- * so combine them together again before returnning.
+ * so combine them together again before returning.
*/
if (head_config)
list_splice_tail(&obj_head_config, head_config);
@@ -1185,10 +1199,10 @@ do { \
}
/*
- * Check term availbility after basic checking so
+ * Check term availability after basic checking so
* PARSE_EVENTS__TERM_TYPE_USER can be found and filtered.
*
- * If check availbility at the entry of this function,
+ * If check availability at the entry of this function,
* user will see "'<sysfs term>' is not usable in 'perf stat'"
* if an invalid config term is provided for legacy events
* (for example, instructions/badterm/...), which is confusing.
@@ -1419,6 +1433,8 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
+ bool hybrid;
+ int ret;
memset(&attr, 0, sizeof(attr));
attr.type = type;
@@ -1433,6 +1449,12 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
return -ENOMEM;
}
+ ret = parse_events__add_numeric_hybrid(parse_state, list, &attr,
+ get_config_name(head_config),
+ &config_terms, &hybrid);
+ if (hybrid)
+ return ret;
+
return add_event(list, &parse_state->idx, &attr,
get_config_name(head_config), &config_terms);
}
@@ -1456,6 +1478,33 @@ static bool config_term_percore(struct list_head *config_terms)
return false;
}
+static int parse_events__inside_hybrid_pmu(struct parse_events_state *parse_state,
+ struct list_head *list, char *name,
+ struct list_head *head_config)
+{
+ struct parse_events_term *term;
+ int ret = -1;
+
+ if (parse_state->fake_pmu || !head_config || list_empty(head_config) ||
+ !perf_pmu__is_hybrid(name)) {
+ return -1;
+ }
+
+ /*
+ * More than one term in list.
+ */
+ if (head_config->next && head_config->next->next != head_config)
+ return -1;
+
+ term = list_first_entry(head_config, struct parse_events_term, list);
+ if (term && term->config && strcmp(term->config, "event")) {
+ ret = parse_events__with_hybrid_pmu(parse_state, term->config,
+ name, list);
+ }
+
+ return ret;
+}
+
int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, char *name,
struct list_head *head_config,
@@ -1549,6 +1598,11 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (pmu->default_config && get_config_chgs(pmu, head_config, &config_terms))
return -ENOMEM;
+ if (!parse_events__inside_hybrid_pmu(parse_state, list, name,
+ head_config)) {
+ return 0;
+ }
+
if (!parse_state->fake_pmu && perf_pmu__config(pmu, &attr, head_config, parse_state->error)) {
struct evsel_config_term *pos, *tmp;
@@ -1567,6 +1621,9 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
if (!evsel)
return -ENOMEM;
+ if (evsel->name)
+ evsel->use_config_name = true;
+
evsel->pmu_name = name ? strdup(name) : NULL;
evsel->use_uncore_alias = use_uncore_alias;
evsel->percore = config_term_percore(&evsel->config_terms);
@@ -1804,6 +1861,7 @@ struct event_modifier {
int pinned;
int weak;
int exclusive;
+ int bpf_counter;
};
static int get_event_modifier(struct event_modifier *mod, char *str,
@@ -1824,6 +1882,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
int exclude = eu | ek | eh;
int exclude_GH = evsel ? evsel->exclude_GH : 0;
int weak = 0;
+ int bpf_counter = 0;
memset(mod, 0, sizeof(*mod));
@@ -1867,6 +1926,8 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
exclusive = 1;
} else if (*str == 'W') {
weak = 1;
+ } else if (*str == 'b') {
+ bpf_counter = 1;
} else
break;
@@ -1898,6 +1959,7 @@ static int get_event_modifier(struct event_modifier *mod, char *str,
mod->sample_read = sample_read;
mod->pinned = pinned;
mod->weak = weak;
+ mod->bpf_counter = bpf_counter;
mod->exclusive = exclusive;
return 0;
@@ -1912,7 +1974,7 @@ static int check_modifier(char *str)
char *p = str;
/* The sizeof includes 0 byte as well. */
- if (strlen(str) > (sizeof("ukhGHpppPSDIWe") - 1))
+ if (strlen(str) > (sizeof("ukhGHpppPSDIWeb") - 1))
return -1;
while (*p) {
@@ -1953,6 +2015,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
evsel->sample_read = mod.sample_read;
evsel->precise_max = mod.precise_max;
evsel->weak_group = mod.weak;
+ evsel->bpf_counter = mod.bpf_counter;
if (evsel__is_group_leader(evsel)) {
evsel->core.attr.pinned = mod.pinned;
@@ -2162,6 +2225,33 @@ int parse_events_terms(struct list_head *terms, const char *str)
return ret;
}
+static int parse_events__with_hybrid_pmu(struct parse_events_state *parse_state,
+ const char *str, char *pmu_name,
+ struct list_head *list)
+{
+ struct parse_events_state ps = {
+ .list = LIST_HEAD_INIT(ps.list),
+ .stoken = PE_START_EVENTS,
+ .hybrid_pmu_name = pmu_name,
+ .idx = parse_state->idx,
+ };
+ int ret;
+
+ ret = parse_events__scanner(str, &ps);
+ perf_pmu__parse_cleanup();
+
+ if (!ret) {
+ if (!list_empty(&ps.list)) {
+ list_splice(&ps.list, list);
+ parse_state->idx = ps.idx;
+ return 0;
+ } else
+ return -1;
+ }
+
+ return ret;
+}
+
int __parse_events(struct evlist *evlist, const char *str,
struct parse_events_error *err, struct perf_pmu *fake_pmu)
{
@@ -3185,3 +3275,12 @@ char *parse_events_formats_error_string(char *additional_terms)
fail:
return NULL;
}
+
+struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr,
+ char *name, struct perf_pmu *pmu,
+ struct list_head *config_terms)
+{
+ return __add_event(list, idx, attr, true, name, pmu,
+ config_terms, false, NULL);
+}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index e80c9b74f2f2..bf6e41aa9b6a 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -138,6 +138,7 @@ struct parse_events_state {
struct list_head *terms;
int stoken;
struct perf_pmu *fake_pmu;
+ char *hybrid_pmu_name;
};
void parse_events__handle_error(struct parse_events_error *err, int idx,
@@ -188,7 +189,8 @@ int parse_events_add_tool(struct parse_events_state *parse_state,
int parse_events_add_cache(struct list_head *list, int *idx,
char *type, char *op_result1, char *op_result2,
struct parse_events_error *error,
- struct list_head *head_config);
+ struct list_head *head_config,
+ struct parse_events_state *parse_state);
int parse_events_add_breakpoint(struct list_head *list, int *idx,
u64 addr, char *type, u64 len);
int parse_events_add_pmu(struct parse_events_state *parse_state,
@@ -263,4 +265,9 @@ static inline bool is_sdt_event(char *str __maybe_unused)
int perf_pmu__test_parse_init(void);
+struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr,
+ char *name, struct perf_pmu *pmu,
+ struct list_head *config_terms);
+
#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/parse-events.l b/tools/perf/util/parse-events.l
index 0b36285a9435..fb8646cc3e83 100644
--- a/tools/perf/util/parse-events.l
+++ b/tools/perf/util/parse-events.l
@@ -210,7 +210,7 @@ name_tag [\'][a-zA-Z_*?\[\]][a-zA-Z0-9_*?\-,\.\[\]:=]*[\']
name_minus [a-zA-Z_*?][a-zA-Z0-9\-_*?.:]*
drv_cfg_term [a-zA-Z0-9_\.]+(=[a-zA-Z0-9_*?\.:]+)?
/* If you add a modifier you need to update check_modifier() */
-modifier_event [ukhpPGHSDIWe]+
+modifier_event [ukhpPGHSDIWeb]+
modifier_bp [rwx]{1,3}
%%
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index d57ac86ce7ca..aba12a4d488e 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -454,7 +454,8 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT '-' PE_NAME_CACHE_OP_RESULT opt_e
list = alloc_list();
ABORT_ON(!list);
- err = parse_events_add_cache(list, &parse_state->idx, $1, $3, $5, error, $6);
+ err = parse_events_add_cache(list, &parse_state->idx, $1, $3, $5, error, $6,
+ parse_state);
parse_events_terms__delete($6);
free($1);
free($3);
@@ -475,7 +476,8 @@ PE_NAME_CACHE_TYPE '-' PE_NAME_CACHE_OP_RESULT opt_event_config
list = alloc_list();
ABORT_ON(!list);
- err = parse_events_add_cache(list, &parse_state->idx, $1, $3, NULL, error, $4);
+ err = parse_events_add_cache(list, &parse_state->idx, $1, $3, NULL, error, $4,
+ parse_state);
parse_events_terms__delete($4);
free($1);
free($3);
@@ -495,7 +497,8 @@ PE_NAME_CACHE_TYPE opt_event_config
list = alloc_list();
ABORT_ON(!list);
- err = parse_events_add_cache(list, &parse_state->idx, $1, NULL, NULL, error, $2);
+ err = parse_events_add_cache(list, &parse_state->idx, $1, NULL, NULL, error, $2,
+ parse_state);
parse_events_terms__delete($2);
free($1);
if (err) {
diff --git a/tools/perf/util/pmu-hybrid.c b/tools/perf/util/pmu-hybrid.c
new file mode 100644
index 000000000000..f51ccaac60ee
--- /dev/null
+++ b/tools/perf/util/pmu-hybrid.c
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/list.h>
+#include <linux/compiler.h>
+#include <linux/string.h>
+#include <linux/zalloc.h>
+#include <sys/types.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <locale.h>
+#include <api/fs/fs.h>
+#include "fncache.h"
+#include "pmu-hybrid.h"
+
+LIST_HEAD(perf_pmu__hybrid_pmus);
+
+bool perf_pmu__hybrid_mounted(const char *name)
+{
+ char path[PATH_MAX];
+ const char *sysfs;
+ FILE *file;
+ int n, cpu;
+
+ if (strncmp(name, "cpu_", 4))
+ return false;
+
+ sysfs = sysfs__mountpoint();
+ if (!sysfs)
+ return false;
+
+ snprintf(path, PATH_MAX, CPUS_TEMPLATE_CPU, sysfs, name);
+ if (!file_available(path))
+ return false;
+
+ file = fopen(path, "r");
+ if (!file)
+ return false;
+
+ n = fscanf(file, "%u", &cpu);
+ fclose(file);
+ if (n <= 0)
+ return false;
+
+ return true;
+}
+
+struct perf_pmu *perf_pmu__find_hybrid_pmu(const char *name)
+{
+ struct perf_pmu *pmu;
+
+ if (!name)
+ return NULL;
+
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ if (!strcmp(name, pmu->name))
+ return pmu;
+ }
+
+ return NULL;
+}
+
+bool perf_pmu__is_hybrid(const char *name)
+{
+ return perf_pmu__find_hybrid_pmu(name) != NULL;
+}
+
+char *perf_pmu__hybrid_type_to_pmu(const char *type)
+{
+ char *pmu_name = NULL;
+
+ if (asprintf(&pmu_name, "cpu_%s", type) < 0)
+ return NULL;
+
+ if (perf_pmu__is_hybrid(pmu_name))
+ return pmu_name;
+
+ /*
+ * pmu may be not scanned, check the sysfs.
+ */
+ if (perf_pmu__hybrid_mounted(pmu_name))
+ return pmu_name;
+
+ free(pmu_name);
+ return NULL;
+}
diff --git a/tools/perf/util/pmu-hybrid.h b/tools/perf/util/pmu-hybrid.h
new file mode 100644
index 000000000000..d0fa7bc50a76
--- /dev/null
+++ b/tools/perf/util/pmu-hybrid.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PMU_HYBRID_H
+#define __PMU_HYBRID_H
+
+#include <linux/perf_event.h>
+#include <linux/compiler.h>
+#include <linux/list.h>
+#include <stdbool.h>
+#include "pmu.h"
+
+extern struct list_head perf_pmu__hybrid_pmus;
+
+#define perf_pmu__for_each_hybrid_pmu(pmu) \
+ list_for_each_entry(pmu, &perf_pmu__hybrid_pmus, hybrid_list)
+
+bool perf_pmu__hybrid_mounted(const char *name);
+
+struct perf_pmu *perf_pmu__find_hybrid_pmu(const char *name);
+bool perf_pmu__is_hybrid(const char *name);
+char *perf_pmu__hybrid_type_to_pmu(const char *type);
+
+#endif /* __PMU_HYBRID_H */
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index 46fd0f998484..88c8ecdc60b0 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -25,6 +25,7 @@
#include "string2.h"
#include "strbuf.h"
#include "fncache.h"
+#include "pmu-hybrid.h"
struct perf_pmu perf_pmu__fake;
@@ -39,6 +40,7 @@ int perf_pmu_parse(struct list_head *list, char *name);
extern FILE *perf_pmu_in;
static LIST_HEAD(pmus);
+static bool hybrid_scanned;
/*
* Parse & process all the sysfs attributes located under
@@ -283,6 +285,7 @@ void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
zfree(&newalias->str);
zfree(&newalias->metric_expr);
zfree(&newalias->metric_name);
+ zfree(&newalias->pmu_name);
parse_events_terms__purge(&newalias->terms);
free(newalias);
}
@@ -297,6 +300,10 @@ static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
list_for_each_entry(a, alist, list) {
if (!strcasecmp(newalias->name, a->name)) {
+ if (newalias->pmu_name && a->pmu_name &&
+ !strcasecmp(newalias->pmu_name, a->pmu_name)) {
+ continue;
+ }
perf_pmu_update_alias(a, newalias);
perf_pmu_free_alias(newalias);
return true;
@@ -306,18 +313,27 @@ static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
}
static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
- char *desc, char *val,
- char *long_desc, char *topic,
- char *unit, char *perpkg,
- char *metric_expr,
- char *metric_name,
- char *deprecated)
+ char *desc, char *val, struct pmu_event *pe)
{
struct parse_events_term *term;
struct perf_pmu_alias *alias;
int ret;
int num;
char newval[256];
+ char *long_desc = NULL, *topic = NULL, *unit = NULL, *perpkg = NULL,
+ *metric_expr = NULL, *metric_name = NULL, *deprecated = NULL,
+ *pmu_name = NULL;
+
+ if (pe) {
+ long_desc = (char *)pe->long_desc;
+ topic = (char *)pe->topic;
+ unit = (char *)pe->unit;
+ perpkg = (char *)pe->perpkg;
+ metric_expr = (char *)pe->metric_expr;
+ metric_name = (char *)pe->metric_name;
+ deprecated = (char *)pe->deprecated;
+ pmu_name = (char *)pe->pmu;
+ }
alias = malloc(sizeof(*alias));
if (!alias)
@@ -382,6 +398,7 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
}
alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
alias->str = strdup(newval);
+ alias->pmu_name = pmu_name ? strdup(pmu_name) : NULL;
if (deprecated)
alias->deprecated = true;
@@ -406,8 +423,7 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
/* Remove trailing newline from sysfs file */
strim(buf);
- return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL,
- NULL, NULL, NULL, NULL);
+ return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL);
}
static inline bool pmu_alias_info_file(char *name)
@@ -599,7 +615,6 @@ static struct perf_cpu_map *__pmu_cpumask(const char *path)
*/
#define SYS_TEMPLATE_ID "./bus/event_source/devices/%s/identifier"
#define CPUS_TEMPLATE_UNCORE "%s/bus/event_source/devices/%s/cpumask"
-#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
static struct perf_cpu_map *pmu_cpumask(const char *name)
{
@@ -631,6 +646,9 @@ static bool pmu_is_uncore(const char *name)
char path[PATH_MAX];
const char *sysfs;
+ if (perf_pmu__hybrid_mounted(name))
+ return false;
+
sysfs = sysfs__mountpoint();
snprintf(path, PATH_MAX, CPUS_TEMPLATE_UNCORE, sysfs, name);
return file_available(path);
@@ -717,6 +735,11 @@ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu)
return map;
}
+struct pmu_events_map *__weak pmu_events_map__find(void)
+{
+ return perf_pmu__find_map(NULL);
+}
+
bool pmu_uncore_alias_match(const char *pmu_name, const char *name)
{
char *tmp = NULL, *tok, *str;
@@ -793,11 +816,7 @@ new_alias:
/* need type casts to override 'const' */
__perf_pmu__new_alias(head, NULL, (char *)pe->name,
(char *)pe->desc, (char *)pe->event,
- (char *)pe->long_desc, (char *)pe->topic,
- (char *)pe->unit, (char *)pe->perpkg,
- (char *)pe->metric_expr,
- (char *)pe->metric_name,
- (char *)pe->deprecated);
+ pe);
}
}
@@ -864,13 +883,7 @@ static int pmu_add_sys_aliases_iter_fn(struct pmu_event *pe, void *data)
(char *)pe->name,
(char *)pe->desc,
(char *)pe->event,
- (char *)pe->long_desc,
- (char *)pe->topic,
- (char *)pe->unit,
- (char *)pe->perpkg,
- (char *)pe->metric_expr,
- (char *)pe->metric_name,
- (char *)pe->deprecated);
+ pe);
}
return 0;
@@ -942,6 +955,7 @@ static struct perf_pmu *pmu_lookup(const char *name)
pmu->is_uncore = pmu_is_uncore(name);
if (pmu->is_uncore)
pmu->id = pmu_id(name);
+ pmu->is_hybrid = perf_pmu__hybrid_mounted(name);
pmu->max_precise = pmu_max_precise(name);
pmu_add_cpu_aliases(&aliases, pmu);
pmu_add_sys_aliases(&aliases, pmu);
@@ -953,6 +967,9 @@ static struct perf_pmu *pmu_lookup(const char *name)
list_splice(&aliases, &pmu->aliases);
list_add_tail(&pmu->list, &pmus);
+ if (pmu->is_hybrid)
+ list_add_tail(&pmu->hybrid_list, &perf_pmu__hybrid_pmus);
+
pmu->default_config = perf_pmu__get_default_config(pmu);
return pmu;
@@ -1069,7 +1086,7 @@ int perf_pmu__format_type(struct list_head *formats, const char *name)
/*
* Sets value based on the format definition (format parameter)
- * and unformated value (value parameter).
+ * and unformatted value (value parameter).
*/
static void pmu_format_value(unsigned long *format, __u64 value, __u64 *v,
bool zero)
@@ -1408,7 +1425,7 @@ int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
}
/*
- * if no unit or scale foundin aliases, then
+ * if no unit or scale found in aliases, then
* set defaults as for evsel
* unit cannot left to NULL
*/
@@ -1845,3 +1862,13 @@ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
"'%llx' not supported by kernel)!\n",
name ?: "N/A", buf, config);
}
+
+bool perf_pmu__has_hybrid(void)
+{
+ if (!hybrid_scanned) {
+ hybrid_scanned = true;
+ perf_pmu__scan(NULL);
+ }
+
+ return !list_empty(&perf_pmu__hybrid_pmus);
+}
diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
index 160b0f561771..a790ef758171 100644
--- a/tools/perf/util/pmu.h
+++ b/tools/perf/util/pmu.h
@@ -5,6 +5,7 @@
#include <linux/bitmap.h>
#include <linux/compiler.h>
#include <linux/perf_event.h>
+#include <linux/list.h>
#include <stdbool.h>
#include "parse-events.h"
#include "pmu-events/pmu-events.h"
@@ -19,6 +20,7 @@ enum {
#define PERF_PMU_FORMAT_BITS 64
#define EVENT_SOURCE_DEVICE_PATH "/bus/event_source/devices/"
+#define CPUS_TEMPLATE_CPU "%s/bus/event_source/devices/%s/cpus"
struct perf_event_attr;
@@ -34,6 +36,7 @@ struct perf_pmu {
__u32 type;
bool selectable;
bool is_uncore;
+ bool is_hybrid;
bool auxtrace;
int max_precise;
struct perf_event_attr *default_config;
@@ -42,6 +45,7 @@ struct perf_pmu {
struct list_head aliases; /* HEAD struct perf_pmu_alias -> list */
struct list_head caps; /* HEAD struct perf_pmu_caps -> list */
struct list_head list; /* ELEM */
+ struct list_head hybrid_list;
};
extern struct perf_pmu perf_pmu__fake;
@@ -72,6 +76,7 @@ struct perf_pmu_alias {
bool deprecated;
char *metric_expr;
char *metric_name;
+ char *pmu_name;
};
struct perf_pmu *perf_pmu__find(const char *name);
@@ -114,6 +119,7 @@ void pmu_add_cpu_aliases_map(struct list_head *head, struct perf_pmu *pmu,
struct pmu_events_map *map);
struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu);
+struct pmu_events_map *pmu_events_map__find(void);
bool pmu_uncore_alias_match(const char *pmu_name, const char *name);
void perf_pmu_free_alias(struct perf_pmu_alias *alias);
@@ -126,4 +132,6 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu);
void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
char *name);
+bool perf_pmu__has_hybrid(void);
+
#endif /* __PMU_H */
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a9cff3a50ddf..a78c8d59a555 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -3228,7 +3228,7 @@ errout:
return err;
}
-/* Concatinate two arrays */
+/* Concatenate two arrays */
static void *memcat(void *a, size_t sz_a, void *b, size_t sz_b)
{
void *ret;
@@ -3258,7 +3258,7 @@ concat_probe_trace_events(struct probe_trace_event **tevs, int *ntevs,
if (*ntevs + ntevs2 > probe_conf.max_probes)
ret = -E2BIG;
else {
- /* Concatinate the array of probe_trace_event */
+ /* Concatenate the array of probe_trace_event */
new_tevs = memcat(*tevs, (*ntevs) * sizeof(**tevs),
*tevs2, ntevs2 * sizeof(**tevs2));
if (!new_tevs)
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 1b118c9c86a6..866f2d514d72 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -164,7 +164,7 @@ static struct probe_trace_arg_ref *alloc_trace_arg_ref(long offs)
/*
* Convert a location into trace_arg.
* If tvar == NULL, this just checks variable can be converted.
- * If fentry == true and vr_die is a parameter, do huristic search
+ * If fentry == true and vr_die is a parameter, do heuristic search
* for the location fuzzed by function entry mcount.
*/
static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
@@ -498,7 +498,7 @@ static int convert_variable_fields(Dwarf_Die *vr_die, const char *varname,
" nor array.\n", varname);
return -EINVAL;
}
- /* While prcessing unnamed field, we don't care about this */
+ /* While processing unnamed field, we don't care about this */
if (field->ref && dwarf_diename(vr_die)) {
pr_err("Semantic error: %s must be referred by '.'\n",
field->name);
@@ -1832,7 +1832,7 @@ static int line_range_walk_cb(const char *fname, int lineno,
(lf->lno_s > lineno || lf->lno_e < lineno))
return 0;
- /* Make sure this line can be reversable */
+ /* Make sure this line can be reversible */
if (cu_find_lineinfo(&lf->cu_die, addr, &__fname, &__lineno) > 0
&& (lineno != __lineno || strcmp(fname, __fname)))
return 0;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 845dd46e3c61..d7c976671e3a 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -37,3 +37,5 @@ util/units.c
util/affinity.c
util/rwsem.c
util/hashmap.c
+util/pmu-hybrid.c
+util/fncache.c
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 278abecb5bdf..412f8e79e409 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -90,6 +90,7 @@ int metricgroup__copy_metric_events(struct evlist *evlist, struct cgroup *cgrp,
*/
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
+int bpf_counter__disable(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel __maybe_unused)
{
@@ -100,6 +101,11 @@ int bpf_counter__install_pe(struct evsel *evsel __maybe_unused, int cpu __maybe_
return 0;
}
+int bpf_counter__disable(struct evsel *evsel __maybe_unused)
+{
+ return 0;
+}
+
/*
* Support debug printing even though util/debug.c is not linked. That means
* implementing 'verbose' and 'eprintf'.
diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c
index 078a71773565..8130b56aa04b 100644
--- a/tools/perf/util/s390-cpumsf.c
+++ b/tools/perf/util/s390-cpumsf.c
@@ -45,7 +45,7 @@
* the data portion is mmap()'ed.
*
* To sort the queues in chronological order, all queue access is controlled
- * by the auxtrace_heap. This is basicly a stack, each stack element has two
+ * by the auxtrace_heap. This is basically a stack, each stack element has two
* entries, the queue number and a time stamp. However the stack is sorted by
* the time stamps. The highest time stamp is at the bottom the lowest
* (nearest) time stamp is at the top. That sort order is maintained at all
@@ -65,11 +65,11 @@
* stamp of the last processed entry of the auxtrace_buffer replaces the
* current auxtrace_heap top.
*
- * 3. Auxtrace_queues might run of out data and are feeded by the
+ * 3. Auxtrace_queues might run of out data and are fed by the
* PERF_RECORD_AUXTRACE handling, see s390_cpumsf_process_auxtrace_event().
*
* Event Generation
- * Each sampling-data entry in the auxilary trace data generates a perf sample.
+ * Each sampling-data entry in the auxiliary trace data generates a perf sample.
* This sample is filled
* with data from the auxtrace such as PID/TID, instruction address, CPU state,
* etc. This sample is processed with perf_session__deliver_synth_event() to
@@ -575,7 +575,7 @@ static unsigned long long get_trailer_time(const unsigned char *buf)
* pointer to the queue, the second parameter is the time stamp. This
* is the time stamp:
* - of the event that triggered this processing.
- * - or the time stamp when the last proccesing of this queue stopped.
+ * - or the time stamp when the last processing of this queue stopped.
* In this case it stopped at a 4KB page boundary and record the
* position on where to continue processing on the next invocation
* (see buffer->use_data and buffer->use_size).
@@ -640,7 +640,7 @@ static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts)
goto out;
}
- pos += dsdes; /* Skip diagnositic entry */
+ pos += dsdes; /* Skip diagnostic entry */
/* Check for trailer entry */
if (!s390_cpumsf_reached_trailer(bsdes + dsdes, pos)) {
diff --git a/tools/perf/util/s390-sample-raw.c b/tools/perf/util/s390-sample-raw.c
index cfcf8d534d76..08ec3c3ae0ee 100644
--- a/tools/perf/util/s390-sample-raw.c
+++ b/tools/perf/util/s390-sample-raw.c
@@ -160,11 +160,9 @@ static void s390_cpumcfdg_dump(struct perf_sample *sample)
const char *color = PERF_COLOR_BLUE;
struct cf_ctrset_entry *cep, ce;
struct pmu_events_map *map;
- struct perf_pmu pmu;
u64 *p;
- memset(&pmu, 0, sizeof(pmu));
- map = perf_pmu__find_map(&pmu);
+ map = pmu_events_map__find();
while (offset < len) {
cep = (struct cf_ctrset_entry *)(buf + offset);
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index c83c2c6564e0..4e4aa4c97ac5 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1531,7 +1531,7 @@ static void set_table_handlers(struct tables *tables)
* Attempt to use the call path root from the call return
* processor, if the call return processor is in use. Otherwise,
* we allocate a new call path root. This prevents exporting
- * duplicate call path ids when both are in use simultaniously.
+ * duplicate call path ids when both are in use simultaneously.
*/
if (tables->dbe.crp)
tables->dbe.cpr = tables->dbe.crp->cpr;
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 859832a82496..a12cf4f0e97a 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -29,6 +29,7 @@
#include "thread-stack.h"
#include "sample-raw.h"
#include "stat.h"
+#include "tsc.h"
#include "ui/progress.h"
#include "../perf.h"
#include "arch/common.h"
@@ -451,6 +452,16 @@ static int process_stat_round_stub(struct perf_session *perf_session __maybe_unu
return 0;
}
+static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
+ union perf_event *event)
+{
+ if (dump_trace)
+ perf_event__fprintf_time_conv(event, stdout);
+
+ dump_printf(": unhandled!\n");
+ return 0;
+}
+
static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
union perf_event *event __maybe_unused,
u64 file_offset __maybe_unused)
@@ -532,7 +543,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
if (tool->stat_round == NULL)
tool->stat_round = process_stat_round_stub;
if (tool->time_conv == NULL)
- tool->time_conv = process_event_op2_stub;
+ tool->time_conv = process_event_time_conv_stub;
if (tool->feature == NULL)
tool->feature = process_event_op2_stub;
if (tool->compressed == NULL)
@@ -949,6 +960,19 @@ static void perf_event__stat_round_swap(union perf_event *event,
event->stat_round.time = bswap_64(event->stat_round.time);
}
+static void perf_event__time_conv_swap(union perf_event *event,
+ bool sample_id_all __maybe_unused)
+{
+ event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
+ event->time_conv.time_mult = bswap_64(event->time_conv.time_mult);
+ event->time_conv.time_zero = bswap_64(event->time_conv.time_zero);
+
+ if (event_contains(event->time_conv, time_cycles)) {
+ event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
+ event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
+ }
+}
+
typedef void (*perf_event__swap_op)(union perf_event *event,
bool sample_id_all);
@@ -985,7 +1009,7 @@ static perf_event__swap_op perf_event__swap_ops[] = {
[PERF_RECORD_STAT] = perf_event__stat_swap,
[PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
[PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
- [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
+ [PERF_RECORD_TIME_CONV] = perf_event__time_conv_swap,
[PERF_RECORD_HEADER_MAX] = NULL,
};
@@ -1069,7 +1093,7 @@ static void callchain__lbr_callstack_printf(struct perf_sample *sample)
* in "to" register.
* For example, there is a call stack
* "A"->"B"->"C"->"D".
- * The LBR registers will recorde like
+ * The LBR registers will be recorded like
* "C"->"D", "B"->"C", "A"->"B".
* So only the first "to" register and all "from"
* registers are needed to construct the whole stack.
@@ -1302,8 +1326,10 @@ static void dump_sample(struct evsel *evsel, union perf_event *event,
if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
printf("... weight: %" PRIu64 "", sample->weight);
- if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT)
+ if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
printf(",0x%"PRIx16"", sample->ins_lat);
+ printf(",0x%"PRIx16"", sample->p_stage_cyc);
+ }
printf("\n");
}
@@ -1584,7 +1610,7 @@ static s64 perf_session__process_user_event(struct perf_session *session,
return tool->event_update(tool, event, &session->evlist);
case PERF_RECORD_HEADER_EVENT_TYPE:
/*
- * Depreceated, but we need to handle it for sake
+ * Deprecated, but we need to handle it for sake
* of old data files create in pipe mode.
*/
return 0;
@@ -2350,7 +2376,8 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp
return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
}
-size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
+ bool skip_empty)
{
size_t ret;
const char *msg = "";
@@ -2360,7 +2387,7 @@ size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
- ret += events_stats__fprintf(&session->evlist->stats, fp);
+ ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
return ret;
}
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index f76480166d38..e31ba4c92a6c 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -113,7 +113,8 @@ size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp);
size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
bool (fn)(struct dso *dso, int parm), int parm);
-size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp);
+size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
+ bool skip_empty);
struct evsel *perf_session__find_first_evtype(struct perf_session *session,
unsigned int type);
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 552b590485bf..88ce47f2547e 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -25,6 +25,7 @@
#include <traceevent/event-parse.h>
#include "mem-events.h"
#include "annotate.h"
+#include "event.h"
#include "time-utils.h"
#include "cgroup.h"
#include "machine.h"
@@ -36,7 +37,7 @@ const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char *default_sort_order = "comm,dso,symbol";
const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
-const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat";
+const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,p_stage_cyc";
const char default_top_sort_order[] = "dso,symbol";
const char default_diff_sort_order[] = "dso,symbol";
const char default_tracepoint_sort_order[] = "trace";
@@ -45,6 +46,8 @@ const char *field_order;
regex_t ignore_callees_regex;
int have_ignore_callees = 0;
enum sort_mode sort__mode = SORT_MODE__NORMAL;
+const char *dynamic_headers[] = {"local_ins_lat", "p_stage_cyc"};
+const char *arch_specific_sort_keys[] = {"p_stage_cyc"};
/*
* Replaces all occurrences of a char used with the:
@@ -1408,6 +1411,25 @@ struct sort_entry sort_global_ins_lat = {
.se_width_idx = HISTC_GLOBAL_INS_LAT,
};
+static int64_t
+sort__global_p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+ return left->stat.p_stage_cyc - right->stat.p_stage_cyc;
+}
+
+static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
+ size_t size, unsigned int width)
+{
+ return repsep_snprintf(bf, size, "%-*u", width, he->stat.p_stage_cyc);
+}
+
+struct sort_entry sort_p_stage_cyc = {
+ .se_header = "Pipeline Stage Cycle",
+ .se_cmp = sort__global_p_stage_cyc_cmp,
+ .se_snprintf = hist_entry__p_stage_cyc_snprintf,
+ .se_width_idx = HISTC_P_STAGE_CYC,
+};
+
struct sort_entry sort_mem_daddr_sym = {
.se_header = "Data Symbol",
.se_cmp = sort__daddr_cmp,
@@ -1816,6 +1838,21 @@ struct sort_dimension {
int taken;
};
+int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
+{
+ return 0;
+}
+
+const char * __weak arch_perf_header_entry(const char *se_header)
+{
+ return se_header;
+}
+
+static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
+{
+ sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
+}
+
#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
static struct sort_dimension common_sort_dimensions[] = {
@@ -1841,6 +1878,7 @@ static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
+ DIM(SORT_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_p_stage_cyc),
};
#undef DIM
@@ -2739,7 +2777,20 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
struct evlist *evlist,
int level)
{
- unsigned int i;
+ unsigned int i, j;
+
+ /*
+ * Check to see if there are any arch specific
+ * sort dimensions not applicable for the current
+ * architecture. If so, Skip that sort key since
+ * we don't want to display it in the output fields.
+ */
+ for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
+ if (!strcmp(arch_specific_sort_keys[j], tok) &&
+ !arch_support_sort_key(tok)) {
+ return 0;
+ }
+ }
for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
struct sort_dimension *sd = &common_sort_dimensions[i];
@@ -2747,6 +2798,11 @@ int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
+ for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
+ if (!strcmp(dynamic_headers[j], sd->name))
+ sort_dimension_add_dynamic_header(sd);
+ }
+
if (sd->entry == &sort_parent) {
int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
if (ret) {
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 63f67a3f3630..87a092645aa7 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -51,6 +51,7 @@ struct he_stat {
u64 period_guest_us;
u64 weight;
u64 ins_lat;
+ u64 p_stage_cyc;
u32 nr_events;
};
@@ -234,6 +235,7 @@ enum sort_type {
SORT_CODE_PAGE_SIZE,
SORT_LOCAL_INS_LAT,
SORT_GLOBAL_INS_LAT,
+ SORT_PIPELINE_STAGE_CYC,
/* branch stack specific sort keys */
__SORT_BRANCH_STACK,
diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c
index 7f09cdaf5b60..a76fff5e7d83 100644
--- a/tools/perf/util/stat-display.c
+++ b/tools/perf/util/stat-display.c
@@ -17,6 +17,8 @@
#include "cgroup.h"
#include <api/fs/fs.h>
#include "util.h"
+#include "iostat.h"
+#include "pmu-hybrid.h"
#define CNTR_NOT_SUPPORTED "<not supported>"
#define CNTR_NOT_COUNTED "<not counted>"
@@ -310,6 +312,11 @@ static void print_metric_header(struct perf_stat_config *config,
struct outstate *os = ctx;
char tbuf[1024];
+ /* In case of iostat, print metric header for first root port only */
+ if (config->iostat_run &&
+ os->evsel->priv != os->evsel->evlist->selected->priv)
+ return;
+
if (!valid_only_metric(unit))
return;
unit = fixunit(tbuf, os->evsel, unit);
@@ -439,6 +446,12 @@ static void printout(struct perf_stat_config *config, struct aggr_cpu_id id, int
if (counter->cgrp)
os.nfields++;
}
+
+ if (!config->no_csv_summary && config->csv_output &&
+ config->summary && !config->interval) {
+ fprintf(config->output, "%16s%s", "summary", config->csv_sep);
+ }
+
if (run == 0 || ena == 0 || counter->counts->scaled == -1) {
if (config->metric_only) {
pm(config, &os, NULL, "", "", 0);
@@ -526,6 +539,7 @@ static void uniquify_event_name(struct evsel *counter)
{
char *new_name;
char *config;
+ int ret = 0;
if (counter->uniquified_name ||
!counter->pmu_name || !strncmp(counter->name, counter->pmu_name,
@@ -540,8 +554,17 @@ static void uniquify_event_name(struct evsel *counter)
counter->name = new_name;
}
} else {
- if (asprintf(&new_name,
- "%s [%s]", counter->name, counter->pmu_name) > 0) {
+ if (perf_pmu__has_hybrid()) {
+ if (!counter->use_config_name) {
+ ret = asprintf(&new_name, "%s/%s/",
+ counter->pmu_name, counter->name);
+ }
+ } else {
+ ret = asprintf(&new_name, "%s [%s]",
+ counter->name, counter->pmu_name);
+ }
+
+ if (ret) {
free(counter->name);
counter->name = new_name;
}
@@ -644,6 +667,9 @@ static void print_counter_aggrdata(struct perf_stat_config *config,
if (!collect_data(config, counter, aggr_cb, &ad))
return;
+ if (perf_pmu__has_hybrid() && ad.ena == 0)
+ return;
+
nr = ad.nr;
ena = ad.ena;
run = ad.run;
@@ -952,8 +978,11 @@ static void print_metric_headers(struct perf_stat_config *config,
if (config->csv_output) {
if (config->interval)
fputs("time,", config->output);
- fputs(aggr_header_csv[config->aggr_mode], config->output);
+ if (!config->iostat_run)
+ fputs(aggr_header_csv[config->aggr_mode], config->output);
}
+ if (config->iostat_run)
+ iostat_print_header_prefix(config);
/* Print metrics headers only */
evlist__for_each_entry(evlist, counter) {
@@ -983,7 +1012,8 @@ static void print_interval(struct perf_stat_config *config,
if (config->interval_clear)
puts(CONSOLE_CLEAR);
- sprintf(prefix, "%6lu.%09lu%s", (unsigned long) ts->tv_sec, ts->tv_nsec, config->csv_sep);
+ if (!config->iostat_run)
+ sprintf(prefix, "%6lu.%09lu%s", (unsigned long) ts->tv_sec, ts->tv_nsec, config->csv_sep);
if ((num_print_interval == 0 && !config->csv_output) || config->interval_clear) {
switch (config->aggr_mode) {
@@ -1019,9 +1049,11 @@ static void print_interval(struct perf_stat_config *config,
break;
case AGGR_GLOBAL:
default:
- fprintf(output, "# time");
- if (!metric_only)
- fprintf(output, " counts %*s events\n", unit_width, "unit");
+ if (!config->iostat_run) {
+ fprintf(output, "# time");
+ if (!metric_only)
+ fprintf(output, " counts %*s events\n", unit_width, "unit");
+ }
case AGGR_UNSET:
break;
}
@@ -1214,6 +1246,9 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
struct evsel *counter;
char buf[64], *prefix = NULL;
+ if (config->iostat_run)
+ evlist->selected = evlist__first(evlist);
+
if (interval)
print_interval(config, evlist, prefix = buf, ts);
else
@@ -1226,7 +1261,7 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
print_metric_headers(config, evlist, prefix, false);
if (num_print_iv++ == 25)
num_print_iv = 0;
- if (config->aggr_mode == AGGR_GLOBAL && prefix)
+ if (config->aggr_mode == AGGR_GLOBAL && prefix && !config->iostat_run)
fprintf(config->output, "%s", prefix);
}
@@ -1243,11 +1278,16 @@ void evlist__print_counters(struct evlist *evlist, struct perf_stat_config *conf
}
break;
case AGGR_GLOBAL:
- evlist__for_each_entry(evlist, counter) {
- print_counter_aggr(config, counter, prefix);
+ if (config->iostat_run)
+ iostat_print_counters(evlist, config, ts, prefix = buf,
+ print_counter_aggr);
+ else {
+ evlist__for_each_entry(evlist, counter) {
+ print_counter_aggr(config, counter, prefix);
+ }
+ if (metric_only)
+ fputc('\n', config->output);
}
- if (metric_only)
- fputc('\n', config->output);
break;
case AGGR_NONE:
if (metric_only)
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 6ccf21a72f06..39967a45f55b 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -9,7 +9,9 @@
#include "expr.h"
#include "metricgroup.h"
#include "cgroup.h"
+#include "units.h"
#include <linux/zalloc.h>
+#include "iostat.h"
/*
* AGGR_GLOBAL: Use CPU 0
@@ -961,7 +963,9 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
struct metric_event *me;
int num = 1;
- if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
+ if (config->iostat_run) {
+ iostat_print_metric(config, evsel, out);
+ } else if (evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
total = runtime_stat_avg(st, STAT_CYCLES, cpu, &rsd);
if (total) {
@@ -1270,18 +1274,15 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
generic_metric(config, evsel->metric_expr, evsel->metric_events, NULL,
evsel->name, evsel->metric_name, NULL, 1, cpu, out, st);
} else if (runtime_stat_n(st, STAT_NSECS, cpu, &rsd) != 0) {
- char unit = 'M';
- char unit_buf[10];
+ char unit = ' ';
+ char unit_buf[10] = "/sec";
total = runtime_stat_avg(st, STAT_NSECS, cpu, &rsd);
-
if (total)
- ratio = 1000.0 * avg / total;
- if (ratio < 0.001) {
- ratio *= 1000;
- unit = 'K';
- }
- snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
+ ratio = convert_unit_double(1000000000.0 * avg / total, &unit);
+
+ if (unit != ' ')
+ snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
} else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
print_smi_cost(config, cpu, out, st, &rsd);
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index c400f8dde017..2db46b9bebd0 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -76,8 +76,7 @@ double rel_stddev_stats(double stddev, double avg)
return pct;
}
-bool __perf_evsel_stat__is(struct evsel *evsel,
- enum perf_stat_evsel_id id)
+bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id)
{
struct perf_stat_evsel *ps = evsel->stats;
diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h
index d85c292148bb..32c8527de347 100644
--- a/tools/perf/util/stat.h
+++ b/tools/perf/util/stat.h
@@ -128,10 +128,12 @@ struct perf_stat_config {
bool all_user;
bool percore_show_thread;
bool summary;
+ bool no_csv_summary;
bool metric_no_group;
bool metric_no_merge;
bool stop_read_counter;
bool quiet;
+ bool iostat_run;
FILE *output;
unsigned int interval;
unsigned int timeout;
@@ -160,6 +162,7 @@ struct perf_stat_config {
};
void perf_stat__set_big_num(int set);
+void perf_stat__set_no_csv_summary(int set);
void update_stats(struct stats *stats, u64 val);
double avg_stats(struct stats *stats);
@@ -187,11 +190,10 @@ struct perf_aggr_thread_value {
u64 ena;
};
-bool __perf_evsel_stat__is(struct evsel *evsel,
- enum perf_stat_evsel_id id);
+bool __perf_stat_evsel__is(struct evsel *evsel, enum perf_stat_evsel_id id);
#define perf_stat_evsel__is(evsel, id) \
- __perf_evsel_stat__is(evsel, PERF_STAT_EVSEL_ID__ ## id)
+ __perf_stat_evsel__is(evsel, PERF_STAT_EVSEL_ID__ ## id)
extern struct runtime_stat rt_stat;
extern struct stats walltime_nsecs_stats;
diff --git a/tools/perf/util/strbuf.h b/tools/perf/util/strbuf.h
index ea94d8628980..be94d7046fa0 100644
--- a/tools/perf/util/strbuf.h
+++ b/tools/perf/util/strbuf.h
@@ -12,7 +12,7 @@
* build complex strings/buffers whose final size isn't easily known.
*
* It is NOT legal to copy the ->buf pointer away.
- * `strbuf_detach' is the operation that detachs a buffer from its shell
+ * `strbuf_detach' is the operation that detaches a buffer from its shell
* while keeping the shell valid wrt its invariants.
*
* 2. the ->buf member is a byte array that has at least ->len + 1 bytes
diff --git a/tools/perf/util/strfilter.h b/tools/perf/util/strfilter.h
index e0c25a40f796..c05aca9ca582 100644
--- a/tools/perf/util/strfilter.h
+++ b/tools/perf/util/strfilter.h
@@ -8,8 +8,8 @@
/* A node of string filter */
struct strfilter_node {
- struct strfilter_node *l; /* Tree left branche (for &,|) */
- struct strfilter_node *r; /* Tree right branche (for !,&,|) */
+ struct strfilter_node *l; /* Tree left branch (for &,|) */
+ struct strfilter_node *r; /* Tree right branch (for !,&,|) */
const char *p; /* Operator or rule */
};
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 6dff843fd883..4c56aa837434 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -1058,7 +1058,7 @@ static int dso__process_kernel_symbol(struct dso *dso, struct map *map,
curr_dso->symtab_type = dso->symtab_type;
maps__insert(kmaps, curr_map);
/*
- * Add it before we drop the referece to curr_map, i.e. while
+ * Add it before we drop the reference to curr_map, i.e. while
* we still are sure to have a reference to this DSO via
* *curr_map->dso.
*/
diff --git a/tools/perf/util/symbol_fprintf.c b/tools/perf/util/symbol_fprintf.c
index 35c936ce33ef..2664fb65e47a 100644
--- a/tools/perf/util/symbol_fprintf.c
+++ b/tools/perf/util/symbol_fprintf.c
@@ -68,7 +68,7 @@ size_t dso__fprintf_symbols_by_name(struct dso *dso,
for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
- fprintf(fp, "%s\n", pos->sym.name);
+ ret += fprintf(fp, "%s\n", pos->sym.name);
}
return ret;
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index dff178103ce5..35aa0c0f7cd9 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -1211,7 +1211,7 @@ static size_t mask_size(struct perf_cpu_map *map, int *max)
*max = 0;
for (i = 0; i < map->nr; i++) {
- /* bit possition of the cpu is + 1 */
+ /* bit position of the cpu is + 1 */
int bit = map->map[i] + 1;
if (bit > *max)
@@ -1237,7 +1237,7 @@ void *cpu_map_data__alloc(struct perf_cpu_map *map, size_t *size, u16 *type, int
* mask = size of 'struct perf_record_record_cpu_map' +
* maximum cpu bit converted to size of longs
*
- * and finaly + the size of 'struct perf_record_cpu_map_data'.
+ * and finally + the size of 'struct perf_record_cpu_map_data'.
*/
size_cpus = cpus_size(map);
size_mask = mask_size(map, max);
diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c
index 03bd99d3be16..a2e906858891 100644
--- a/tools/perf/util/syscalltbl.c
+++ b/tools/perf/util/syscalltbl.c
@@ -34,6 +34,10 @@ static const char **syscalltbl_native = syscalltbl_powerpc_32;
#include <asm/syscalls.c>
const int syscalltbl_native_max_id = SYSCALLTBL_ARM64_MAX_ID;
static const char **syscalltbl_native = syscalltbl_arm64;
+#elif defined(__mips__)
+#include <asm/syscalls_n64.c>
+const int syscalltbl_native_max_id = SYSCALLTBL_MIPS_N64_MAX_ID;
+static const char **syscalltbl_native = syscalltbl_mips_n64;
#endif
struct syscall {
diff --git a/tools/perf/util/target.h b/tools/perf/util/target.h
index f132c6c2eef8..4ff56217f2a6 100644
--- a/tools/perf/util/target.h
+++ b/tools/perf/util/target.h
@@ -16,6 +16,8 @@ struct target {
bool uses_mmap;
bool default_per_cpu;
bool per_thread;
+ bool use_bpf;
+ const char *attr_map;
};
enum target_errno {
@@ -64,11 +66,6 @@ static inline bool target__has_cpu(struct target *target)
return target->system_wide || target->cpu_list;
}
-static inline bool target__has_bpf(struct target *target)
-{
- return target->bpf_str;
-}
-
static inline bool target__none(struct target *target)
{
return !target__has_task(target) && !target__has_cpu(target);
diff --git a/tools/perf/util/thread-stack.h b/tools/perf/util/thread-stack.h
index 3bc47a42af8e..b3cd09beb62f 100644
--- a/tools/perf/util/thread-stack.h
+++ b/tools/perf/util/thread-stack.h
@@ -16,7 +16,6 @@ struct comm;
struct ip_callchain;
struct symbol;
struct dso;
-struct comm;
struct perf_sample;
struct addr_location;
struct call_path;
diff --git a/tools/perf/util/tsc.c b/tools/perf/util/tsc.c
index 62b4c75c966c..f19791d46e99 100644
--- a/tools/perf/util/tsc.c
+++ b/tools/perf/util/tsc.c
@@ -1,5 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
#include <errno.h>
+#include <inttypes.h>
+#include <string.h>
#include <linux/compiler.h>
#include <linux/perf_event.h>
@@ -110,3 +112,31 @@ u64 __weak rdtsc(void)
{
return 0;
}
+
+size_t perf_event__fprintf_time_conv(union perf_event *event, FILE *fp)
+{
+ struct perf_record_time_conv *tc = (struct perf_record_time_conv *)event;
+ size_t ret;
+
+ ret = fprintf(fp, "\n... Time Shift %" PRI_lu64 "\n", tc->time_shift);
+ ret += fprintf(fp, "... Time Muliplier %" PRI_lu64 "\n", tc->time_mult);
+ ret += fprintf(fp, "... Time Zero %" PRI_lu64 "\n", tc->time_zero);
+
+ /*
+ * The event TIME_CONV was extended for the fields from "time_cycles"
+ * when supported cap_user_time_short, for backward compatibility,
+ * prints the extended fields only if they are contained in the event.
+ */
+ if (event_contains(*tc, time_cycles)) {
+ ret += fprintf(fp, "... Time Cycles %" PRI_lu64 "\n",
+ tc->time_cycles);
+ ret += fprintf(fp, "... Time Mask %#" PRI_lx64 "\n",
+ tc->time_mask);
+ ret += fprintf(fp, "... Cap Time Zero %" PRId32 "\n",
+ tc->cap_user_time_zero);
+ ret += fprintf(fp, "... Cap Time Short %" PRId32 "\n",
+ tc->cap_user_time_short);
+ }
+
+ return ret;
+}
diff --git a/tools/perf/util/tsc.h b/tools/perf/util/tsc.h
index 72a15419f3b3..7d83a31732a7 100644
--- a/tools/perf/util/tsc.h
+++ b/tools/perf/util/tsc.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
+#include "event.h"
+
struct perf_tsc_conversion {
u16 time_shift;
u32 time_mult;
@@ -24,4 +26,6 @@ u64 perf_time_to_tsc(u64 ns, struct perf_tsc_conversion *tc);
u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc);
u64 rdtsc(void);
+size_t perf_event__fprintf_time_conv(union perf_event *event, FILE *fp);
+
#endif // __PERF_TSC_H
diff --git a/tools/perf/util/units.c b/tools/perf/util/units.c
index a46762aec4c9..32c39cfe209b 100644
--- a/tools/perf/util/units.c
+++ b/tools/perf/util/units.c
@@ -33,28 +33,35 @@ unsigned long parse_tag_value(const char *str, struct parse_tag *tags)
return (unsigned long) -1;
}
-unsigned long convert_unit(unsigned long value, char *unit)
+double convert_unit_double(double value, char *unit)
{
*unit = ' ';
- if (value > 1000) {
- value /= 1000;
+ if (value > 1000.0) {
+ value /= 1000.0;
*unit = 'K';
}
- if (value > 1000) {
- value /= 1000;
+ if (value > 1000.0) {
+ value /= 1000.0;
*unit = 'M';
}
- if (value > 1000) {
- value /= 1000;
+ if (value > 1000.0) {
+ value /= 1000.0;
*unit = 'G';
}
return value;
}
+unsigned long convert_unit(unsigned long value, char *unit)
+{
+ double v = convert_unit_double((double)value, unit);
+
+ return (unsigned long)v;
+}
+
int unit_number__scnprintf(char *buf, size_t size, u64 n)
{
char unit[4] = "BKMG";
diff --git a/tools/perf/util/units.h b/tools/perf/util/units.h
index 99263b6a23f7..ea43e74e3240 100644
--- a/tools/perf/util/units.h
+++ b/tools/perf/util/units.h
@@ -12,6 +12,7 @@ struct parse_tag {
unsigned long parse_tag_value(const char *str, struct parse_tag *tags);
+double convert_unit_double(double value, char *unit);
unsigned long convert_unit(unsigned long value, char *unit);
int unit_number__scnprintf(char *buf, size_t size, u64 n);
diff --git a/tools/perf/util/unwind-libunwind-local.c b/tools/perf/util/unwind-libunwind-local.c
index 9aededc0bc06..71a353349181 100644
--- a/tools/perf/util/unwind-libunwind-local.c
+++ b/tools/perf/util/unwind-libunwind-local.c
@@ -82,7 +82,7 @@ UNW_OBJ(dwarf_find_debug_frame) (int found, unw_dyn_info_t *di_debug,
#define DW_EH_PE_funcrel 0x40 /* start-of-procedure-relative */
#define DW_EH_PE_aligned 0x50 /* aligned pointer */
-/* Flags intentionaly not handled, since they're not needed:
+/* Flags intentionally not handled, since they're not needed:
* #define DW_EH_PE_indirect 0x80
* #define DW_EH_PE_uleb128 0x01
* #define DW_EH_PE_udata2 0x02