aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-03-08 10:15:43 +0100
committerIngo Molnar <mingo@kernel.org>2016-03-08 10:15:43 +0100
commitb9461ba85f11de61d11ad4e13c806ff174ddf577 (patch)
tree80923f1ac26505ec187e9e3ef0946befb1fe0012
parentMerge tag 'perf-core-for-mingo-20160303' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core (diff)
parentperf report: Use hierarchy hpp list on gtk (diff)
downloadlinux-dev-b9461ba85f11de61d11ad4e13c806ff174ddf577.tar.xz
linux-dev-b9461ba85f11de61d11ad4e13c806ff174ddf577.zip
Merge branch 'email/acme' into perf/core
Merge perf/core improvements and fixes from Arnaldo Carvalho de Melo: User visible changes: - Allow grouping multiple sort keys per 'perf report/top --hierarchy' level (Namhyung Kim) - Document 'perf stat --detailed' option (Borislav Petkov) Infrastructure changes: - jitdump prep work for supporting it with Intel PT (Adrian Hunter) - Use 64-bit shifts with (TSC) time conversion (Adrian Hunter) Fixes: - Explicitly declare inc_group_count as a void function (Colin Ian King) Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--tools/perf/Documentation/perf-stat.txt8
-rw-r--r--tools/perf/arch/x86/tests/rdpmc.c2
-rw-r--r--tools/perf/builtin-inject.c52
-rw-r--r--tools/perf/ui/browsers/hists.c147
-rw-r--r--tools/perf/ui/gtk/hists.c73
-rw-r--r--tools/perf/ui/hist.c69
-rw-r--r--tools/perf/ui/stdio/hist.c171
-rw-r--r--tools/perf/util/hist.c72
-rw-r--r--tools/perf/util/hist.h14
-rw-r--r--tools/perf/util/jitdump.c29
-rw-r--r--tools/perf/util/parse-events.y2
-rw-r--r--tools/perf/util/session.c40
-rw-r--r--tools/perf/util/sort.c146
-rw-r--r--tools/perf/util/sort.h1
-rw-r--r--tools/perf/util/tsc.c2
15 files changed, 514 insertions, 314 deletions
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 52ef7a9d50aa..14d9e8ffaff7 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -69,6 +69,14 @@ report::
--scale::
scale/normalize counter values
+-d::
+--detailed::
+ print more detailed statistics, can be specified up to 3 times
+
+ -d: detailed events, L1 and LLC data cache
+ -d -d: more detailed events, dTLB and iTLB events
+ -d -d -d: very detailed events, adding prefetch events
+
-r::
--repeat=<n>::
repeat command and print average + stddev (max: 100). 0 means forever.
diff --git a/tools/perf/arch/x86/tests/rdpmc.c b/tools/perf/arch/x86/tests/rdpmc.c
index 7945462851a4..72193f19d6d7 100644
--- a/tools/perf/arch/x86/tests/rdpmc.c
+++ b/tools/perf/arch/x86/tests/rdpmc.c
@@ -59,7 +59,7 @@ static u64 mmap_read_self(void *addr)
u64 quot, rem;
quot = (cyc >> time_shift);
- rem = cyc & ((1 << time_shift) - 1);
+ rem = cyc & (((u64)1 << time_shift) - 1);
delta = time_offset + quot * time_mult +
((rem * time_mult) >> time_shift);
diff --git a/tools/perf/builtin-inject.c b/tools/perf/builtin-inject.c
index b38445f08c2f..b2885776b602 100644
--- a/tools/perf/builtin-inject.c
+++ b/tools/perf/builtin-inject.c
@@ -253,12 +253,16 @@ static int perf_event__jit_repipe_mmap(struct perf_tool *tool,
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
u64 n = 0;
+ int ret;
/*
* if jit marker, then inject jit mmaps and generate ELF images
*/
- if (!jit_process(inject->session, &inject->output, machine,
- event->mmap.filename, sample->pid, &n)) {
+ ret = jit_process(inject->session, &inject->output, machine,
+ event->mmap.filename, sample->pid, &n);
+ if (ret < 0)
+ return ret;
+ if (ret) {
inject->bytes_written += n;
return 0;
}
@@ -287,12 +291,16 @@ static int perf_event__jit_repipe_mmap2(struct perf_tool *tool,
{
struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
u64 n = 0;
+ int ret;
/*
* if jit marker, then inject jit mmaps and generate ELF images
*/
- if (!jit_process(inject->session, &inject->output, machine,
- event->mmap2.filename, sample->pid, &n)) {
+ ret = jit_process(inject->session, &inject->output, machine,
+ event->mmap2.filename, sample->pid, &n);
+ if (ret < 0)
+ return ret;
+ if (ret) {
inject->bytes_written += n;
return 0;
}
@@ -679,12 +687,16 @@ static int __cmd_inject(struct perf_inject *inject)
ret = perf_session__process_events(session);
if (!file_out->is_pipe) {
- if (inject->build_ids) {
+ if (inject->build_ids)
perf_header__set_feat(&session->header,
HEADER_BUILD_ID);
- if (inject->have_auxtrace)
- dsos__hit_all(session);
- }
+ /*
+ * Keep all buildids when there is unprocessed AUX data because
+ * it is not known which ones the AUX trace hits.
+ */
+ if (perf_header__has_feat(&session->header, HEADER_BUILD_ID) &&
+ inject->have_auxtrace && !inject->itrace_synth_opts.set)
+ dsos__hit_all(session);
/*
* The AUX areas have been removed and replaced with
* synthesized hardware events, so clear the feature flag and
@@ -717,23 +729,6 @@ static int __cmd_inject(struct perf_inject *inject)
return ret;
}
-#ifdef HAVE_LIBELF_SUPPORT
-static int
-jit_validate_events(struct perf_session *session)
-{
- struct perf_evsel *evsel;
-
- /*
- * check that all events use CLOCK_MONOTONIC
- */
- evlist__for_each(session->evlist, evsel) {
- if (evsel->attr.use_clockid == 0 || evsel->attr.clockid != CLOCK_MONOTONIC)
- return -1;
- }
- return 0;
-}
-#endif
-
int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
{
struct perf_inject inject = {
@@ -840,13 +835,6 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
}
#ifdef HAVE_LIBELF_SUPPORT
if (inject.jit_mode) {
- /*
- * validate event is using the correct clockid
- */
- if (jit_validate_events(inject.session)) {
- fprintf(stderr, "error, jitted code must be sampled with perf record -k 1\n");
- return -1;
- }
inject.tool.mmap2 = perf_event__jit_repipe_mmap2;
inject.tool.mmap = perf_event__jit_repipe_mmap;
inject.tool.ordered_events = true;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 5ffffcb1e3c5..e0e217ec856b 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -1280,7 +1280,7 @@ static int hist_browser__show_entry(struct hist_browser *browser,
static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
struct hist_entry *entry,
unsigned short row,
- int level, int nr_sort_keys)
+ int level)
{
int printed = 0;
int width = browser->b.width;
@@ -1289,12 +1289,13 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
off_t row_offset = entry->row_offset;
bool first = true;
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
struct hpp_arg arg = {
.b = &browser->b,
.current_entry = current_entry,
};
int column = 0;
- int hierarchy_indent = (nr_sort_keys - 1) * HIERARCHY_INDENT;
+ int hierarchy_indent = (entry->hists->nr_hpp_node - 2) * HIERARCHY_INDENT;
if (current_entry) {
browser->he_selection = entry;
@@ -1320,7 +1321,10 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
ui_browser__write_nstring(&browser->b, "", level * HIERARCHY_INDENT);
width -= level * HIERARCHY_INDENT;
- hists__for_each_format(entry->hists, fmt) {
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&entry->hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
char s[2048];
struct perf_hpp hpp = {
.buf = s,
@@ -1332,10 +1336,6 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
column++ < browser->b.horiz_scroll)
continue;
- if (perf_hpp__is_sort_entry(fmt) ||
- perf_hpp__is_dynamic_entry(fmt))
- break;
-
if (current_entry && browser->b.navkeypressed) {
ui_browser__set_color(&browser->b,
HE_COLORSET_SELECTED);
@@ -1388,25 +1388,26 @@ static int hist_browser__show_hierarchy_entry(struct hist_browser *browser,
HE_COLORSET_NORMAL);
}
- ui_browser__write_nstring(&browser->b, "", 2);
- width -= 2;
+ perf_hpp_list__for_each_format(entry->hpp_list, fmt) {
+ ui_browser__write_nstring(&browser->b, "", 2);
+ width -= 2;
- /*
- * No need to call hist_entry__snprintf_alignment()
- * since this fmt is always the last column in the
- * hierarchy mode.
- */
- fmt = entry->fmt;
- if (fmt->color) {
- width -= fmt->color(fmt, &hpp, entry);
- } else {
- int i = 0;
+ /*
+ * No need to call hist_entry__snprintf_alignment()
+ * since this fmt is always the last column in the
+ * hierarchy mode.
+ */
+ if (fmt->color) {
+ width -= fmt->color(fmt, &hpp, entry);
+ } else {
+ int i = 0;
- width -= fmt->entry(fmt, &hpp, entry);
- ui_browser__printf(&browser->b, "%s", ltrim(s));
+ width -= fmt->entry(fmt, &hpp, entry);
+ ui_browser__printf(&browser->b, "%s", ltrim(s));
- while (isspace(s[i++]))
- width++;
+ while (isspace(s[i++]))
+ width++;
+ }
}
}
@@ -1435,8 +1436,7 @@ show_callchain:
}
static int hist_browser__show_no_entry(struct hist_browser *browser,
- unsigned short row,
- int level, int nr_sort_keys)
+ unsigned short row, int level)
{
int width = browser->b.width;
bool current_entry = ui_browser__is_current_entry(&browser->b, row);
@@ -1444,6 +1444,8 @@ static int hist_browser__show_no_entry(struct hist_browser *browser,
int column = 0;
int ret;
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
+ int indent = browser->hists->nr_hpp_node - 2;
if (current_entry) {
browser->he_selection = NULL;
@@ -1460,15 +1462,14 @@ static int hist_browser__show_no_entry(struct hist_browser *browser,
ui_browser__write_nstring(&browser->b, "", level * HIERARCHY_INDENT);
width -= level * HIERARCHY_INDENT;
- hists__for_each_format(browser->hists, fmt) {
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&browser->hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (perf_hpp__should_skip(fmt, browser->hists) ||
column++ < browser->b.horiz_scroll)
continue;
- if (perf_hpp__is_sort_entry(fmt) ||
- perf_hpp__is_dynamic_entry(fmt))
- break;
-
ret = fmt->width(fmt, NULL, hists_to_evsel(browser->hists));
if (first) {
@@ -1484,8 +1485,8 @@ static int hist_browser__show_no_entry(struct hist_browser *browser,
width -= ret;
}
- ui_browser__write_nstring(&browser->b, "", nr_sort_keys * HIERARCHY_INDENT);
- width -= nr_sort_keys * HIERARCHY_INDENT;
+ ui_browser__write_nstring(&browser->b, "", indent * HIERARCHY_INDENT);
+ width -= indent * HIERARCHY_INDENT;
if (column >= browser->b.horiz_scroll) {
char buf[32];
@@ -1550,22 +1551,23 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
.size = size,
};
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
size_t ret = 0;
int column = 0;
- int nr_sort_keys = hists->nr_sort_keys;
- bool first = true;
+ int indent = hists->nr_hpp_node - 2;
+ bool first_node, first_col;
ret = scnprintf(buf, size, " ");
if (advance_hpp_check(&dummy_hpp, ret))
return ret;
- hists__for_each_format(hists, fmt) {
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (column++ < browser->b.horiz_scroll)
continue;
- if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
- break;
-
ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
if (advance_hpp_check(&dummy_hpp, ret))
break;
@@ -1576,38 +1578,46 @@ static int hists_browser__scnprintf_hierarchy_headers(struct hist_browser *brows
}
ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "%*s",
- (nr_sort_keys - 1) * HIERARCHY_INDENT, "");
+ indent * HIERARCHY_INDENT, "");
if (advance_hpp_check(&dummy_hpp, ret))
return ret;
- hists__for_each_format(hists, fmt) {
- char *start;
-
- if (!perf_hpp__is_sort_entry(fmt) && !perf_hpp__is_dynamic_entry(fmt))
- continue;
- if (perf_hpp__should_skip(fmt, hists))
- continue;
-
- if (first) {
- first = false;
- } else {
+ first_node = true;
+ list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
+ if (!first_node) {
ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, " / ");
if (advance_hpp_check(&dummy_hpp, ret))
break;
}
+ first_node = false;
- ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
- dummy_hpp.buf[ret] = '\0';
- rtrim(dummy_hpp.buf);
+ first_col = true;
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ char *start;
- start = ltrim(dummy_hpp.buf);
- ret = strlen(start);
+ if (perf_hpp__should_skip(fmt, hists))
+ continue;
+
+ if (!first_col) {
+ ret = scnprintf(dummy_hpp.buf, dummy_hpp.size, "+");
+ if (advance_hpp_check(&dummy_hpp, ret))
+ break;
+ }
+ first_col = false;
- if (start != dummy_hpp.buf)
- memmove(dummy_hpp.buf, start, ret + 1);
+ ret = fmt->header(fmt, &dummy_hpp, hists_to_evsel(hists));
+ dummy_hpp.buf[ret] = '\0';
+ rtrim(dummy_hpp.buf);
- if (advance_hpp_check(&dummy_hpp, ret))
- break;
+ start = ltrim(dummy_hpp.buf);
+ ret = strlen(start);
+
+ if (start != dummy_hpp.buf)
+ memmove(dummy_hpp.buf, start, ret + 1);
+
+ if (advance_hpp_check(&dummy_hpp, ret))
+ break;
+ }
}
return ret;
@@ -1644,7 +1654,6 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser)
u16 header_offset = 0;
struct rb_node *nd;
struct hist_browser *hb = container_of(browser, struct hist_browser, b);
- int nr_sort = hb->hists->nr_sort_keys;
if (hb->show_headers) {
hist_browser__show_headers(hb);
@@ -1671,14 +1680,12 @@ static unsigned int hist_browser__refresh(struct ui_browser *browser)
if (symbol_conf.report_hierarchy) {
row += hist_browser__show_hierarchy_entry(hb, h, row,
- h->depth,
- nr_sort);
+ h->depth);
if (row == browser->rows)
break;
if (h->has_no_entry) {
- hist_browser__show_no_entry(hb, row, h->depth,
- nr_sort);
+ hist_browser__show_no_entry(hb, row, h->depth + 1);
row++;
}
} else {
@@ -1934,7 +1941,7 @@ static int hist_browser__fprintf_hierarchy_entry(struct hist_browser *browser,
struct perf_hpp_fmt *fmt;
bool first = true;
int ret;
- int hierarchy_indent = (nr_sort_keys + 1) * HIERARCHY_INDENT;
+ int hierarchy_indent = nr_sort_keys * HIERARCHY_INDENT;
printed = fprintf(fp, "%*s", level * HIERARCHY_INDENT, "");
@@ -1962,9 +1969,13 @@ static int hist_browser__fprintf_hierarchy_entry(struct hist_browser *browser,
ret = scnprintf(hpp.buf, hpp.size, "%*s", hierarchy_indent, "");
advance_hpp(&hpp, ret);
- fmt = he->fmt;
- ret = fmt->entry(fmt, &hpp, he);
- advance_hpp(&hpp, ret);
+ perf_hpp_list__for_each_format(he->hpp_list, fmt) {
+ ret = scnprintf(hpp.buf, hpp.size, " ");
+ advance_hpp(&hpp, ret);
+
+ ret = fmt->entry(fmt, &hpp, he);
+ advance_hpp(&hpp, ret);
+ }
printed += fprintf(fp, "%s\n", rtrim(s));
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index a5758fdfbe1f..bd9bf7e343b1 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -407,11 +407,14 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
struct rb_node *node;
struct hist_entry *he;
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
u64 total = hists__total_period(hists);
+ int size;
for (node = rb_first(root); node; node = rb_next(node)) {
GtkTreeIter iter;
float percent;
+ char *bf;
he = rb_entry(node, struct hist_entry, rb_node);
if (he->filtered)
@@ -424,11 +427,11 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
gtk_tree_store_append(store, &iter, parent);
col_idx = 0;
- hists__for_each_format(hists, fmt) {
- if (perf_hpp__is_sort_entry(fmt) ||
- perf_hpp__is_dynamic_entry(fmt))
- break;
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
if (fmt->color)
fmt->color(fmt, hpp, he);
else
@@ -437,15 +440,26 @@ static void perf_gtk__add_hierarchy_entries(struct hists *hists,
gtk_tree_store_set(store, &iter, col_idx++, hpp->buf, -1);
}
- fmt = he->fmt;
- if (fmt->color)
- fmt->color(fmt, hpp, he);
- else
- fmt->entry(fmt, hpp, he);
+ bf = hpp->buf;
+ size = hpp->size;
+ perf_hpp_list__for_each_format(he->hpp_list, fmt) {
+ int ret;
+
+ if (fmt->color)
+ ret = fmt->color(fmt, hpp, he);
+ else
+ ret = fmt->entry(fmt, hpp, he);
- gtk_tree_store_set(store, &iter, col_idx, rtrim(hpp->buf), -1);
+ snprintf(hpp->buf + ret, hpp->size - ret, " ");
+ advance_hpp(hpp, ret + 2);
+ }
+
+ gtk_tree_store_set(store, &iter, col_idx, ltrim(rtrim(bf)), -1);
if (!he->leaf) {
+ hpp->buf = bf;
+ hpp->size = size;
+
perf_gtk__add_hierarchy_entries(hists, &he->hroot_out,
store, &iter, hpp,
min_pcnt);
@@ -478,6 +492,7 @@ static void perf_gtk__show_hierarchy(GtkWidget *window, struct hists *hists,
float min_pcnt)
{
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
GType col_types[MAX_COLUMNS];
GtkCellRenderer *renderer;
GtkTreeStore *store;
@@ -486,7 +501,7 @@ static void perf_gtk__show_hierarchy(GtkWidget *window, struct hists *hists,
int nr_cols = 0;
char s[512];
char buf[512];
- bool first = true;
+ bool first_node, first_col;
struct perf_hpp hpp = {
.buf = s,
.size = sizeof(s),
@@ -506,11 +521,11 @@ static void perf_gtk__show_hierarchy(GtkWidget *window, struct hists *hists,
renderer = gtk_cell_renderer_text_new();
col_idx = 0;
- hists__for_each_format(hists, fmt) {
- if (perf_hpp__is_sort_entry(fmt) ||
- perf_hpp__is_dynamic_entry(fmt))
- break;
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
-1, fmt->name,
renderer, "markup",
@@ -519,20 +534,24 @@ static void perf_gtk__show_hierarchy(GtkWidget *window, struct hists *hists,
/* construct merged column header since sort keys share single column */
buf[0] = '\0';
- hists__for_each_format(hists ,fmt) {
- if (!perf_hpp__is_sort_entry(fmt) &&
- !perf_hpp__is_dynamic_entry(fmt))
- continue;
- if (perf_hpp__should_skip(fmt, hists))
- continue;
-
- if (first)
- first = false;
- else
+ first_node = true;
+ list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
+ if (!first_node)
strcat(buf, " / ");
+ first_node = false;
+
+ first_col = true;
+ perf_hpp_list__for_each_format(&fmt_node->hpp ,fmt) {
+ if (perf_hpp__should_skip(fmt, hists))
+ continue;
- fmt->header(fmt, &hpp, hists_to_evsel(hists));
- strcat(buf, rtrim(hpp.buf));
+ if (!first_col)
+ strcat(buf, "+");
+ first_col = false;
+
+ fmt->header(fmt, &hpp, hists_to_evsel(hists));
+ strcat(buf, ltrim(rtrim(hpp.buf)));
+ }
}
gtk_tree_view_insert_column_with_attributes(GTK_TREE_VIEW(view),
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index 7c0585c146e1..f03c4f70438f 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -5,6 +5,7 @@
#include "../util/util.h"
#include "../util/sort.h"
#include "../util/evsel.h"
+#include "../util/evlist.h"
/* hist period print (hpp) functions */
@@ -715,3 +716,71 @@ void perf_hpp__set_user_width(const char *width_list_str)
break;
}
}
+
+static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
+{
+ struct perf_hpp_list_node *node = NULL;
+ struct perf_hpp_fmt *fmt_copy;
+ bool found = false;
+ bool skip = perf_hpp__should_skip(fmt, hists);
+
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ if (node->level == fmt->level) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ node = malloc(sizeof(*node));
+ if (node == NULL)
+ return -1;
+
+ node->skip = skip;
+ node->level = fmt->level;
+ perf_hpp_list__init(&node->hpp);
+
+ hists->nr_hpp_node++;
+ list_add_tail(&node->list, &hists->hpp_formats);
+ }
+
+ fmt_copy = perf_hpp_fmt__dup(fmt);
+ if (fmt_copy == NULL)
+ return -1;
+
+ if (!skip)
+ node->skip = false;
+
+ list_add_tail(&fmt_copy->list, &node->hpp.fields);
+ list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
+
+ return 0;
+}
+
+int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
+ struct perf_evlist *evlist)
+{
+ struct perf_evsel *evsel;
+ struct perf_hpp_fmt *fmt;
+ struct hists *hists;
+ int ret;
+
+ if (!symbol_conf.report_hierarchy)
+ return 0;
+
+ evlist__for_each(evlist, evsel) {
+ hists = evsel__hists(evsel);
+
+ perf_hpp_list__for_each_sort_list(list, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt) &&
+ !perf_hpp__defined_dynamic_entry(fmt, hists))
+ continue;
+
+ ret = add_hierarchy_fmt(hists, fmt);
+ if (ret < 0)
+ return ret;
+ }
+ }
+
+ return 0;
+}
diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 6d06fbb365b6..7aff5acf3265 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -412,11 +412,12 @@ static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
struct perf_hpp *hpp,
- int nr_sort_key, struct hists *hists,
+ struct hists *hists,
FILE *fp)
{
const char *sep = symbol_conf.field_sep;
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
char *buf = hpp->buf;
size_t size = hpp->size;
int ret, printed = 0;
@@ -428,10 +429,10 @@ static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
advance_hpp(hpp, ret);
- hists__for_each_format(he->hists, fmt) {
- if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
- break;
-
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
/*
* If there's no field_sep, we still need
* to display initial ' '.
@@ -451,33 +452,33 @@ static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
advance_hpp(hpp, ret);
}
- if (sep)
- ret = scnprintf(hpp->buf, hpp->size, "%s", sep);
- else
+ if (!sep)
ret = scnprintf(hpp->buf, hpp->size, "%*s",
- (nr_sort_key - 1) * HIERARCHY_INDENT + 2, "");
+ (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
advance_hpp(hpp, ret);
printed += fprintf(fp, "%s", buf);
- hpp->buf = buf;
- hpp->size = size;
-
- /*
- * No need to call hist_entry__snprintf_alignment() since this
- * fmt is always the last column in the hierarchy mode.
- */
- fmt = he->fmt;
- if (perf_hpp__use_color() && fmt->color)
- fmt->color(fmt, hpp, he);
- else
- fmt->entry(fmt, hpp, he);
-
- /*
- * dynamic entries are right-aligned but we want left-aligned
- * in the hierarchy mode
- */
- printed += fprintf(fp, "%s\n", ltrim(buf));
+ perf_hpp_list__for_each_format(he->hpp_list, fmt) {
+ hpp->buf = buf;
+ hpp->size = size;
+
+ /*
+ * No need to call hist_entry__snprintf_alignment() since this
+ * fmt is always the last column in the hierarchy mode.
+ */
+ if (perf_hpp__use_color() && fmt->color)
+ fmt->color(fmt, hpp, he);
+ else
+ fmt->entry(fmt, hpp, he);
+
+ /*
+ * dynamic entries are right-aligned but we want left-aligned
+ * in the hierarchy mode
+ */
+ printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
+ }
+ printed += putc('\n', fp);
if (symbol_conf.use_callchain && he->leaf) {
u64 total = hists__total_period(hists);
@@ -504,12 +505,8 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
if (size == 0 || size > bfsz)
size = hpp.size = bfsz;
- if (symbol_conf.report_hierarchy) {
- int nr_sort = hists->nr_sort_keys;
-
- return hist_entry__hierarchy_fprintf(he, &hpp, nr_sort,
- hists, fp);
- }
+ if (symbol_conf.report_hierarchy)
+ return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
hist_entry__snprintf(he, &hpp);
@@ -521,92 +518,97 @@ static int hist_entry__fprintf(struct hist_entry *he, size_t size,
return ret;
}
-static int print_hierarchy_indent(const char *sep, int nr_sort,
+static int print_hierarchy_indent(const char *sep, int indent,
const char *line, FILE *fp)
{
- if (sep != NULL || nr_sort < 1)
+ if (sep != NULL || indent < 2)
return 0;
- return fprintf(fp, "%-.*s", (nr_sort - 1) * HIERARCHY_INDENT, line);
+ return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
}
static int print_hierarchy_header(struct hists *hists, struct perf_hpp *hpp,
const char *sep, FILE *fp)
{
- bool first = true;
- int nr_sort;
+ bool first_node, first_col;
+ int indent;
int depth;
unsigned width = 0;
unsigned header_width = 0;
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
- nr_sort = hists->nr_sort_keys;
+ indent = hists->nr_hpp_node;
/* preserve max indent depth for column headers */
- print_hierarchy_indent(sep, nr_sort, spaces, fp);
+ print_hierarchy_indent(sep, indent, spaces, fp);
- hists__for_each_format(hists, fmt) {
- if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
- break;
-
- if (!first)
- fprintf(fp, "%s", sep ?: " ");
- else
- first = false;
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
fmt->header(fmt, hpp, hists_to_evsel(hists));
- fprintf(fp, "%s", hpp->buf);
+ fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
}
/* combine sort headers with ' / ' */
- first = true;
- hists__for_each_format(hists, fmt) {
- if (!perf_hpp__is_sort_entry(fmt) && !perf_hpp__is_dynamic_entry(fmt))
- continue;
- if (perf_hpp__should_skip(fmt, hists))
- continue;
-
- if (!first)
+ first_node = true;
+ list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
+ if (!first_node)
header_width += fprintf(fp, " / ");
- else {
- fprintf(fp, "%s", sep ?: " ");
- first = false;
- }
+ first_node = false;
- fmt->header(fmt, hpp, hists_to_evsel(hists));
- rtrim(hpp->buf);
+ first_col = true;
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (perf_hpp__should_skip(fmt, hists))
+ continue;
+
+ if (!first_col)
+ header_width += fprintf(fp, "+");
+ first_col = false;
- header_width += fprintf(fp, "%s", ltrim(hpp->buf));
+ fmt->header(fmt, hpp, hists_to_evsel(hists));
+ rtrim(hpp->buf);
+
+ header_width += fprintf(fp, "%s", ltrim(hpp->buf));
+ }
}
fprintf(fp, "\n# ");
/* preserve max indent depth for initial dots */
- print_hierarchy_indent(sep, nr_sort, dots, fp);
+ print_hierarchy_indent(sep, indent, dots, fp);
- first = true;
- hists__for_each_format(hists, fmt) {
- if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
- break;
+ /* the first hpp_list_node is for overhead columns */
+ fmt_node = list_first_entry(&hists->hpp_formats,
+ struct perf_hpp_list_node, list);
- if (!first)
- fprintf(fp, "%s", sep ?: " ");
- else
- first = false;
+ first_col = true;
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (!first_col)
+ fprintf(fp, "%s", sep ?: "..");
+ first_col = false;
width = fmt->width(fmt, hpp, hists_to_evsel(hists));
fprintf(fp, "%.*s", width, dots);
}
depth = 0;
- hists__for_each_format(hists, fmt) {
- if (!perf_hpp__is_sort_entry(fmt) && !perf_hpp__is_dynamic_entry(fmt))
- continue;
- if (perf_hpp__should_skip(fmt, hists))
- continue;
+ list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
+ first_col = true;
+ width = depth * HIERARCHY_INDENT;
- width = fmt->width(fmt, hpp, hists_to_evsel(hists));
- width += depth * HIERARCHY_INDENT;
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
+ if (perf_hpp__should_skip(fmt, hists))
+ continue;
+
+ if (!first_col)
+ width++; /* for '+' sign between column header */
+ first_col = false;
+
+ width += fmt->width(fmt, hpp, hists_to_evsel(hists));
+ }
if (width > header_width)
header_width = width;
@@ -625,6 +627,7 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
int max_cols, float min_pcnt, FILE *fp)
{
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *fmt_node;
struct rb_node *nd;
size_t ret = 0;
unsigned int width;
@@ -654,6 +657,10 @@ size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
fprintf(fp, "# ");
if (symbol_conf.report_hierarchy) {
+ list_for_each_entry(fmt_node, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format(&fmt_node->hpp, fmt)
+ perf_hpp__reset_width(fmt, hists);
+ }
nr_rows += print_hierarchy_header(hists, &dummy_hpp, sep, fp);
goto print_entries;
}
@@ -738,9 +745,9 @@ print_entries:
* display "no entry >= x.xx%" message.
*/
if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
- int nr_sort = hists->nr_sort_keys;
+ int depth = hists->nr_hpp_node + h->depth + 1;
- print_hierarchy_indent(sep, nr_sort + h->depth + 1, spaces, fp);
+ print_hierarchy_indent(sep, depth, spaces, fp);
fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
if (max_rows && ++nr_rows >= max_rows)
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 4b8b67bc0cd8..29da9e0d8db9 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -1091,18 +1091,25 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
struct rb_root *root,
struct hist_entry *he,
- struct perf_hpp_fmt *fmt)
+ struct perf_hpp_list *hpp_list)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter, *new;
+ struct perf_hpp_fmt *fmt;
int64_t cmp;
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node_in);
- cmp = fmt->collapse(fmt, iter, he);
+ cmp = 0;
+ perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
+ cmp = fmt->collapse(fmt, iter, he);
+ if (cmp)
+ break;
+ }
+
if (!cmp) {
he_stat__add_stat(&iter->stat, &he->stat);
return iter;
@@ -1121,24 +1128,26 @@ static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
hists__apply_filters(hists, new);
hists->nr_entries++;
- /* save related format for output */
- new->fmt = fmt;
+ /* save related format list for output */
+ new->hpp_list = hpp_list;
/* some fields are now passed to 'new' */
- if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
- he->trace_output = NULL;
- else
- new->trace_output = NULL;
+ perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
+ if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
+ he->trace_output = NULL;
+ else
+ new->trace_output = NULL;
- if (perf_hpp__is_srcline_entry(fmt))
- he->srcline = NULL;
- else
- new->srcline = NULL;
+ if (perf_hpp__is_srcline_entry(fmt))
+ he->srcline = NULL;
+ else
+ new->srcline = NULL;
- if (perf_hpp__is_srcfile_entry(fmt))
- he->srcfile = NULL;
- else
- new->srcfile = NULL;
+ if (perf_hpp__is_srcfile_entry(fmt))
+ he->srcfile = NULL;
+ else
+ new->srcfile = NULL;
+ }
rb_link_node(&new->rb_node_in, parent, p);
rb_insert_color(&new->rb_node_in, root);
@@ -1149,21 +1158,19 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
struct rb_root *root,
struct hist_entry *he)
{
- struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list_node *node;
struct hist_entry *new_he = NULL;
struct hist_entry *parent = NULL;
int depth = 0;
int ret = 0;
- hists__for_each_sort_list(hists, fmt) {
- if (!perf_hpp__is_sort_entry(fmt) &&
- !perf_hpp__is_dynamic_entry(fmt))
- continue;
- if (perf_hpp__should_skip(fmt, hists))
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ /* skip period (overhead) and elided columns */
+ if (node->level == 0 || node->skip)
continue;
/* insert copy of 'he' for each fmt into the hierarchy */
- new_he = hierarchy_insert_entry(hists, root, he, fmt);
+ new_he = hierarchy_insert_entry(hists, root, he, &node->hpp);
if (new_he == NULL) {
ret = -1;
break;
@@ -1358,6 +1365,7 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
+ struct perf_hpp_fmt *fmt;
while (*p != NULL) {
parent = *p;
@@ -1373,8 +1381,10 @@ static void hierarchy_insert_output_entry(struct rb_root *root,
rb_insert_color(&he->rb_node, root);
/* update column width of dynamic entry */
- if (perf_hpp__is_dynamic_entry(he->fmt))
- he->fmt->sort(he->fmt, he, NULL);
+ perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt))
+ fmt->sort(fmt, he, NULL);
+ }
}
static void hists__hierarchy_output_resort(struct hists *hists,
@@ -2105,6 +2115,7 @@ int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
pthread_mutex_init(&hists->lock, NULL);
hists->socket_filter = -1;
hists->hpp_list = hpp_list;
+ INIT_LIST_HEAD(&hists->hpp_formats);
return 0;
}
@@ -2133,8 +2144,19 @@ static void hists__delete_all_entries(struct hists *hists)
static void hists_evsel__exit(struct perf_evsel *evsel)
{
struct hists *hists = evsel__hists(evsel);
+ struct perf_hpp_fmt *fmt, *pos;
+ struct perf_hpp_list_node *node, *tmp;
hists__delete_all_entries(hists);
+
+ list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
+ list_del(&fmt->list);
+ free(fmt);
+ }
+ list_del(&node->list);
+ free(node);
+ }
}
static int hists_evsel__init(struct perf_evsel *evsel)
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index da5e50586bfd..2cb017f28f9e 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -78,7 +78,9 @@ struct hists {
u16 col_len[HISTC_NR_COLS];
int socket_filter;
struct perf_hpp_list *hpp_list;
+ struct list_head hpp_formats;
int nr_sort_keys;
+ int nr_hpp_node;
};
struct hist_entry_iter;
@@ -233,6 +235,7 @@ struct perf_hpp_fmt {
int len;
int user_len;
int idx;
+ int level;
};
struct perf_hpp_list {
@@ -243,6 +246,13 @@ struct perf_hpp_list {
extern struct perf_hpp_list perf_hpp_list;
+struct perf_hpp_list_node {
+ struct list_head list;
+ struct perf_hpp_list hpp;
+ int level;
+ bool skip;
+};
+
void perf_hpp_list__column_register(struct perf_hpp_list *list,
struct perf_hpp_fmt *format);
void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
@@ -298,6 +308,8 @@ void perf_hpp__cancel_cumulate(void);
void perf_hpp__setup_output_field(struct perf_hpp_list *list);
void perf_hpp__reset_output_field(struct perf_hpp_list *list);
void perf_hpp__append_sort_keys(struct perf_hpp_list *list);
+int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
+ struct perf_evlist *evlist);
bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format);
@@ -307,6 +319,8 @@ bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt);
bool perf_hpp__is_srcline_entry(struct perf_hpp_fmt *fmt);
bool perf_hpp__is_srcfile_entry(struct perf_hpp_fmt *fmt);
+struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt);
+
int hist_entry__filter(struct hist_entry *he, int type, const void *arg);
static inline bool perf_hpp__should_skip(struct perf_hpp_fmt *format,
diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c
index 99fa5eee9fe0..cd272cc21e05 100644
--- a/tools/perf/util/jitdump.c
+++ b/tools/perf/util/jitdump.c
@@ -99,6 +99,21 @@ jit_close(struct jit_buf_desc *jd)
}
static int
+jit_validate_events(struct perf_session *session)
+{
+ struct perf_evsel *evsel;
+
+ /*
+ * check that all events use CLOCK_MONOTONIC
+ */
+ evlist__for_each(session->evlist, evsel) {
+ if (evsel->attr.use_clockid == 0 || evsel->attr.clockid != CLOCK_MONOTONIC)
+ return -1;
+ }
+ return 0;
+}
+
+static int
jit_open(struct jit_buf_desc *jd, const char *name)
{
struct jitheader header;
@@ -157,6 +172,14 @@ jit_open(struct jit_buf_desc *jd, const char *name)
goto error;
}
+ /*
+ * validate event is using the correct clockid
+ */
+ if (jit_validate_events(jd->session)) {
+ pr_err("error, jitted code must be sampled with perf record -k 1\n");
+ goto error;
+ }
+
bs = header.total_size - sizeof(header);
if (bs > bsz) {
@@ -647,7 +670,7 @@ jit_process(struct perf_session *session,
* first, detect marker mmap (i.e., the jitdump mmap)
*/
if (jit_detect(filename, pid))
- return -1;
+ return 0;
memset(&jd, 0, sizeof(jd));
@@ -665,8 +688,10 @@ jit_process(struct perf_session *session,
*nbytes = 0;
ret = jit_inject(&jd, filename);
- if (!ret)
+ if (!ret) {
*nbytes = jd.bytes_written;
+ ret = 1;
+ }
return ret;
}
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 85c44ba79cad..5be4a5f216d6 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -28,7 +28,7 @@ do { \
INIT_LIST_HEAD(list); \
} while (0)
-static inc_group_count(struct list_head *list,
+static void inc_group_count(struct list_head *list,
struct parse_events_evlist *data)
{
/* Count groups only have more than 1 members */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 40b7a0d0905b..60b3593d210d 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -240,14 +240,6 @@ static int process_event_stub(struct perf_tool *tool __maybe_unused,
return 0;
}
-static int process_build_id_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
union perf_event *event __maybe_unused,
struct ordered_events *oe __maybe_unused)
@@ -260,23 +252,6 @@ static int process_finished_round(struct perf_tool *tool,
union perf_event *event,
struct ordered_events *oe);
-static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *perf_session
- __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
-static int process_event_auxtrace_info_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
-{
- dump_printf(": unhandled!\n");
- return 0;
-}
-
static int skipn(int fd, off_t n)
{
char buf[4096];
@@ -303,10 +278,9 @@ static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
return event->auxtrace.size;
}
-static
-int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused,
- union perf_event *event __maybe_unused,
- struct perf_session *session __maybe_unused)
+static int process_event_op2_stub(struct perf_tool *tool __maybe_unused,
+ union perf_event *event __maybe_unused,
+ struct perf_session *session __maybe_unused)
{
dump_printf(": unhandled!\n");
return 0;
@@ -410,7 +384,7 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
if (tool->tracing_data == NULL)
tool->tracing_data = process_event_synth_tracing_data_stub;
if (tool->build_id == NULL)
- tool->build_id = process_build_id_stub;
+ tool->build_id = process_event_op2_stub;
if (tool->finished_round == NULL) {
if (tool->ordered_events)
tool->finished_round = process_finished_round;
@@ -418,13 +392,13 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
tool->finished_round = process_finished_round_stub;
}
if (tool->id_index == NULL)
- tool->id_index = process_id_index_stub;
+ tool->id_index = process_event_op2_stub;
if (tool->auxtrace_info == NULL)
- tool->auxtrace_info = process_event_auxtrace_info_stub;
+ tool->auxtrace_info = process_event_op2_stub;
if (tool->auxtrace == NULL)
tool->auxtrace = process_event_auxtrace_stub;
if (tool->auxtrace_error == NULL)
- tool->auxtrace_error = process_event_auxtrace_error_stub;
+ tool->auxtrace_error = process_event_op2_stub;
if (tool->thread_map == NULL)
tool->thread_map = process_event_thread_map_stub;
if (tool->cpu_map == NULL)
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 4380a2858802..041f236379e0 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -1544,7 +1544,7 @@ static void hse_free(struct perf_hpp_fmt *fmt)
}
static struct hpp_sort_entry *
-__sort_dimension__alloc_hpp(struct sort_dimension *sd)
+__sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
{
struct hpp_sort_entry *hse;
@@ -1572,6 +1572,7 @@ __sort_dimension__alloc_hpp(struct sort_dimension *sd)
hse->hpp.elide = false;
hse->hpp.len = 0;
hse->hpp.user_len = 0;
+ hse->hpp.level = level;
return hse;
}
@@ -1581,7 +1582,8 @@ static void hpp_free(struct perf_hpp_fmt *fmt)
free(fmt);
}
-static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
+static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
+ int level)
{
struct perf_hpp_fmt *fmt;
@@ -1590,6 +1592,7 @@ static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd)
INIT_LIST_HEAD(&fmt->list);
INIT_LIST_HEAD(&fmt->sort_list);
fmt->free = hpp_free;
+ fmt->level = level;
}
return fmt;
@@ -1611,9 +1614,9 @@ int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
return hse->se->se_filter(he, type, arg);
}
-static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
+static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd, int level)
{
- struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
+ struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
if (hse == NULL)
return -1;
@@ -1625,7 +1628,7 @@ static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd)
static int __sort_dimension__add_hpp_output(struct perf_hpp_list *list,
struct sort_dimension *sd)
{
- struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd);
+ struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
if (hse == NULL)
return -1;
@@ -1868,7 +1871,8 @@ static void hde_free(struct perf_hpp_fmt *fmt)
}
static struct hpp_dynamic_entry *
-__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
+__alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field,
+ int level)
{
struct hpp_dynamic_entry *hde;
@@ -1899,10 +1903,39 @@ __alloc_dynamic_entry(struct perf_evsel *evsel, struct format_field *field)
hde->hpp.elide = false;
hde->hpp.len = 0;
hde->hpp.user_len = 0;
+ hde->hpp.level = level;
return hde;
}
+struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
+{
+ struct perf_hpp_fmt *new_fmt = NULL;
+
+ if (perf_hpp__is_sort_entry(fmt)) {
+ struct hpp_sort_entry *hse, *new_hse;
+
+ hse = container_of(fmt, struct hpp_sort_entry, hpp);
+ new_hse = memdup(hse, sizeof(*hse));
+ if (new_hse)
+ new_fmt = &new_hse->hpp;
+ } else if (perf_hpp__is_dynamic_entry(fmt)) {
+ struct hpp_dynamic_entry *hde, *new_hde;
+
+ hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
+ new_hde = memdup(hde, sizeof(*hde));
+ if (new_hde)
+ new_fmt = &new_hde->hpp;
+ } else {
+ new_fmt = memdup(fmt, sizeof(*fmt));
+ }
+
+ INIT_LIST_HEAD(&new_fmt->list);
+ INIT_LIST_HEAD(&new_fmt->sort_list);
+
+ return new_fmt;
+}
+
static int parse_field_name(char *str, char **event, char **field, char **opt)
{
char *event_name, *field_name, *opt_name;
@@ -1974,11 +2007,11 @@ static struct perf_evsel *find_evsel(struct perf_evlist *evlist, char *event_nam
static int __dynamic_dimension__add(struct perf_evsel *evsel,
struct format_field *field,
- bool raw_trace)
+ bool raw_trace, int level)
{
struct hpp_dynamic_entry *hde;
- hde = __alloc_dynamic_entry(evsel, field);
+ hde = __alloc_dynamic_entry(evsel, field, level);
if (hde == NULL)
return -ENOMEM;
@@ -1988,14 +2021,14 @@ static int __dynamic_dimension__add(struct perf_evsel *evsel,
return 0;
}
-static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
+static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace, int level)
{
int ret;
struct format_field *field;
field = evsel->tp_format->format.fields;
while (field) {
- ret = __dynamic_dimension__add(evsel, field, raw_trace);
+ ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
if (ret < 0)
return ret;
@@ -2004,7 +2037,8 @@ static int add_evsel_fields(struct perf_evsel *evsel, bool raw_trace)
return 0;
}
-static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
+static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace,
+ int level)
{
int ret;
struct perf_evsel *evsel;
@@ -2013,7 +2047,7 @@ static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
if (evsel->attr.type != PERF_TYPE_TRACEPOINT)
continue;
- ret = add_evsel_fields(evsel, raw_trace);
+ ret = add_evsel_fields(evsel, raw_trace, level);
if (ret < 0)
return ret;
}
@@ -2021,7 +2055,7 @@ static int add_all_dynamic_fields(struct perf_evlist *evlist, bool raw_trace)
}
static int add_all_matching_fields(struct perf_evlist *evlist,
- char *field_name, bool raw_trace)
+ char *field_name, bool raw_trace, int level)
{
int ret = -ESRCH;
struct perf_evsel *evsel;
@@ -2035,14 +2069,15 @@ static int add_all_matching_fields(struct perf_evlist *evlist,
if (field == NULL)
continue;
- ret = __dynamic_dimension__add(evsel, field, raw_trace);
+ ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
if (ret < 0)
break;
}
return ret;
}
-static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
+static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok,
+ int level)
{
char *str, *event_name, *field_name, *opt_name;
struct perf_evsel *evsel;
@@ -2072,12 +2107,12 @@ static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
}
if (!strcmp(field_name, "trace_fields")) {
- ret = add_all_dynamic_fields(evlist, raw_trace);
+ ret = add_all_dynamic_fields(evlist, raw_trace, level);
goto out;
}
if (event_name == NULL) {
- ret = add_all_matching_fields(evlist, field_name, raw_trace);
+ ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
goto out;
}
@@ -2095,7 +2130,7 @@ static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
}
if (!strcmp(field_name, "*")) {
- ret = add_evsel_fields(evsel, raw_trace);
+ ret = add_evsel_fields(evsel, raw_trace, level);
} else {
field = pevent_find_any_field(evsel->tp_format, field_name);
if (field == NULL) {
@@ -2104,7 +2139,7 @@ static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok)
return -ENOENT;
}
- ret = __dynamic_dimension__add(evsel, field, raw_trace);
+ ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
}
out:
@@ -2112,12 +2147,12 @@ out:
return ret;
}
-static int __sort_dimension__add(struct sort_dimension *sd)
+static int __sort_dimension__add(struct sort_dimension *sd, int level)
{
if (sd->taken)
return 0;
- if (__sort_dimension__add_hpp_sort(sd) < 0)
+ if (__sort_dimension__add_hpp_sort(sd, level) < 0)
return -1;
if (sd->entry->se_collapse)
@@ -2128,14 +2163,14 @@ static int __sort_dimension__add(struct sort_dimension *sd)
return 0;
}
-static int __hpp_dimension__add(struct hpp_dimension *hd)
+static int __hpp_dimension__add(struct hpp_dimension *hd, int level)
{
struct perf_hpp_fmt *fmt;
if (hd->taken)
return 0;
- fmt = __hpp_dimension__alloc_hpp(hd);
+ fmt = __hpp_dimension__alloc_hpp(hd, level);
if (!fmt)
return -1;
@@ -2165,7 +2200,7 @@ static int __hpp_dimension__add_output(struct perf_hpp_list *list,
if (hd->taken)
return 0;
- fmt = __hpp_dimension__alloc_hpp(hd);
+ fmt = __hpp_dimension__alloc_hpp(hd, 0);
if (!fmt)
return -1;
@@ -2180,8 +2215,8 @@ int hpp_dimension__add_output(unsigned col)
return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
}
-static int sort_dimension__add(const char *tok,
- struct perf_evlist *evlist __maybe_unused)
+static int sort_dimension__add(const char *tok, struct perf_evlist *evlist,
+ int level)
{
unsigned int i;
@@ -2220,7 +2255,7 @@ static int sort_dimension__add(const char *tok,
sort__has_thread = 1;
}
- return __sort_dimension__add(sd);
+ return __sort_dimension__add(sd, level);
}
for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
@@ -2229,7 +2264,7 @@ static int sort_dimension__add(const char *tok,
if (strncasecmp(tok, hd->name, strlen(tok)))
continue;
- return __hpp_dimension__add(hd);
+ return __hpp_dimension__add(hd, level);
}
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
@@ -2244,7 +2279,7 @@ static int sort_dimension__add(const char *tok,
if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
sort__has_sym = 1;
- __sort_dimension__add(sd);
+ __sort_dimension__add(sd, level);
return 0;
}
@@ -2260,11 +2295,11 @@ static int sort_dimension__add(const char *tok,
if (sd->entry == &sort_mem_daddr_sym)
sort__has_sym = 1;
- __sort_dimension__add(sd);
+ __sort_dimension__add(sd, level);
return 0;
}
- if (!add_dynamic_entry(evlist, tok))
+ if (!add_dynamic_entry(evlist, tok, level))
return 0;
return -ESRCH;
@@ -2274,18 +2309,41 @@ static int setup_sort_list(char *str, struct perf_evlist *evlist)
{
char *tmp, *tok;
int ret = 0;
+ int level = 0;
+ int next_level = 1;
+ bool in_group = false;
+
+ do {
+ tok = str;
+ tmp = strpbrk(str, "{}, ");
+ if (tmp) {
+ if (in_group)
+ next_level = level;
+ else
+ next_level = level + 1;
+
+ if (*tmp == '{')
+ in_group = true;
+ else if (*tmp == '}')
+ in_group = false;
+
+ *tmp = '\0';
+ str = tmp + 1;
+ }
- for (tok = strtok_r(str, ", ", &tmp);
- tok; tok = strtok_r(NULL, ", ", &tmp)) {
- ret = sort_dimension__add(tok, evlist);
- if (ret == -EINVAL) {
- error("Invalid --sort key: `%s'", tok);
- break;
- } else if (ret == -ESRCH) {
- error("Unknown --sort key: `%s'", tok);
- break;
+ if (*tok) {
+ ret = sort_dimension__add(tok, evlist, level);
+ if (ret == -EINVAL) {
+ error("Invalid --sort key: `%s'", tok);
+ break;
+ } else if (ret == -ESRCH) {
+ error("Unknown --sort key: `%s'", tok);
+ break;
+ }
}
- }
+
+ level = next_level;
+ } while (tmp);
return ret;
}
@@ -2667,7 +2725,7 @@ int setup_sorting(struct perf_evlist *evlist)
return err;
if (parent_pattern != default_parent_pattern) {
- err = sort_dimension__add("parent", evlist);
+ err = sort_dimension__add("parent", evlist, -1);
if (err < 0)
return err;
}
@@ -2692,6 +2750,10 @@ int setup_sorting(struct perf_evlist *evlist)
/* and then copy output fields to sort keys */
perf_hpp__append_sort_keys(&perf_hpp_list);
+ /* setup hists-specific output fields */
+ if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
+ return -1;
+
return 0;
}
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 25a5529a94e4..ea1f722cffea 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -130,6 +130,7 @@ struct hist_entry {
u32 raw_size;
void *trace_output;
struct perf_hpp_fmt *fmt;
+ struct perf_hpp_list *hpp_list;
struct hist_entry *parent_he;
union {
/* this is for hierarchical entry structure */
diff --git a/tools/perf/util/tsc.c b/tools/perf/util/tsc.c
index 4d4210d4e13d..1b741646eed0 100644
--- a/tools/perf/util/tsc.c
+++ b/tools/perf/util/tsc.c
@@ -19,7 +19,7 @@ u64 tsc_to_perf_time(u64 cyc, struct perf_tsc_conversion *tc)
u64 quot, rem;
quot = cyc >> tc->time_shift;
- rem = cyc & ((1 << tc->time_shift) - 1);
+ rem = cyc & (((u64)1 << tc->time_shift) - 1);
return tc->time_zero + quot * tc->time_mult +
((rem * tc->time_mult) >> tc->time_shift);
}