aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/hist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/hist.c')
-rw-r--r--tools/perf/util/hist.c841
1 files changed, 749 insertions, 92 deletions
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 68a7612019dc..290b3cbf6877 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -179,6 +179,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
if (h->transaction)
hists__new_col_len(hists, HISTC_TRANSACTION,
hist_entry__transaction_len());
+
+ if (h->trace_output)
+ hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
}
void hists__output_recalc_col_len(struct hists *hists, int max_rows)
@@ -245,6 +248,8 @@ static void he_stat__decay(struct he_stat *he_stat)
/* XXX need decay for weight too? */
}
+static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
+
static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
{
u64 prev_period = he->stat.period;
@@ -260,21 +265,45 @@ static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
diff = prev_period - he->stat.period;
- hists->stats.total_period -= diff;
- if (!he->filtered)
- hists->stats.total_non_filtered_period -= diff;
+ if (!he->depth) {
+ hists->stats.total_period -= diff;
+ if (!he->filtered)
+ hists->stats.total_non_filtered_period -= diff;
+ }
+
+ if (!he->leaf) {
+ struct hist_entry *child;
+ struct rb_node *node = rb_first(&he->hroot_out);
+ while (node) {
+ child = rb_entry(node, struct hist_entry, rb_node);
+ node = rb_next(node);
+
+ if (hists__decay_entry(hists, child))
+ hists__delete_entry(hists, child);
+ }
+ }
return he->stat.period == 0;
}
static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
{
- rb_erase(&he->rb_node, &hists->entries);
+ struct rb_root *root_in;
+ struct rb_root *root_out;
- if (sort__need_collapse)
- rb_erase(&he->rb_node_in, &hists->entries_collapsed);
- else
- rb_erase(&he->rb_node_in, hists->entries_in);
+ if (he->parent_he) {
+ root_in = &he->parent_he->hroot_in;
+ root_out = &he->parent_he->hroot_out;
+ } else {
+ if (sort__need_collapse)
+ root_in = &hists->entries_collapsed;
+ else
+ root_in = hists->entries_in;
+ root_out = &hists->entries;
+ }
+
+ rb_erase(&he->rb_node_in, root_in);
+ rb_erase(&he->rb_node, root_out);
--hists->nr_entries;
if (!he->filtered)
@@ -393,6 +422,9 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
}
INIT_LIST_HEAD(&he->pairs.node);
thread__get(he->thread);
+
+ if (!symbol_conf.report_hierarchy)
+ he->leaf = true;
}
return he;
@@ -405,6 +437,16 @@ static u8 symbol__parent_filter(const struct symbol *parent)
return 0;
}
+static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
+{
+ if (!symbol_conf.use_callchain)
+ return;
+
+ he->hists->callchain_period += period;
+ if (!he->filtered)
+ he->hists->callchain_non_filtered_period += period;
+}
+
static struct hist_entry *hists__findnew_entry(struct hists *hists,
struct hist_entry *entry,
struct addr_location *al,
@@ -432,8 +474,10 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
cmp = hist_entry__cmp(he, entry);
if (!cmp) {
- if (sample_self)
+ if (sample_self) {
he_stat__add_period(&he->stat, period, weight);
+ hist_entry__add_callchain_period(he, period);
+ }
if (symbol_conf.cumulate_callchain)
he_stat__add_period(he->stat_acc, period, weight);
@@ -466,6 +510,8 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
if (!he)
return NULL;
+ if (sample_self)
+ hist_entry__add_callchain_period(he, period);
hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
@@ -951,10 +997,15 @@ out:
int64_t
hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
{
+ struct hists *hists = left->hists;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
- perf_hpp__for_each_sort_list(fmt) {
+ hists__for_each_sort_list(hists, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt) &&
+ !perf_hpp__defined_dynamic_entry(fmt, hists))
+ continue;
+
cmp = fmt->cmp(fmt, left, right);
if (cmp)
break;
@@ -966,10 +1017,15 @@ hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
int64_t
hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
{
+ struct hists *hists = left->hists;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
- perf_hpp__for_each_sort_list(fmt) {
+ hists__for_each_sort_list(hists, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt) &&
+ !perf_hpp__defined_dynamic_entry(fmt, hists))
+ continue;
+
cmp = fmt->collapse(fmt, left, right);
if (cmp)
break;
@@ -1006,17 +1062,250 @@ void hist_entry__delete(struct hist_entry *he)
}
/*
+ * If this is not the last column, then we need to pad it according to the
+ * pre-calculated max lenght for this column, otherwise don't bother adding
+ * spaces because that would break viewing this with, for instance, 'less',
+ * that would show tons of trailing spaces when a long C++ demangled method
+ * names is sampled.
+*/
+int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
+ struct perf_hpp_fmt *fmt, int printed)
+{
+ if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
+ const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
+ if (printed < width) {
+ advance_hpp(hpp, printed);
+ printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
+ }
+ }
+
+ return printed;
+}
+
+/*
* collapse the histogram
*/
-bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
- struct rb_root *root, struct hist_entry *he)
+static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
+static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
+ enum hist_filter type);
+
+typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
+
+static bool check_thread_entry(struct perf_hpp_fmt *fmt)
+{
+ return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
+}
+
+static void hist_entry__check_and_remove_filter(struct hist_entry *he,
+ enum hist_filter type,
+ fmt_chk_fn check)
+{
+ struct perf_hpp_fmt *fmt;
+ bool type_match = false;
+ struct hist_entry *parent = he->parent_he;
+
+ switch (type) {
+ case HIST_FILTER__THREAD:
+ if (symbol_conf.comm_list == NULL &&
+ symbol_conf.pid_list == NULL &&
+ symbol_conf.tid_list == NULL)
+ return;
+ break;
+ case HIST_FILTER__DSO:
+ if (symbol_conf.dso_list == NULL)
+ return;
+ break;
+ case HIST_FILTER__SYMBOL:
+ if (symbol_conf.sym_list == NULL)
+ return;
+ break;
+ case HIST_FILTER__PARENT:
+ case HIST_FILTER__GUEST:
+ case HIST_FILTER__HOST:
+ case HIST_FILTER__SOCKET:
+ default:
+ return;
+ }
+
+ /* if it's filtered by own fmt, it has to have filter bits */
+ perf_hpp_list__for_each_format(he->hpp_list, fmt) {
+ if (check(fmt)) {
+ type_match = true;
+ break;
+ }
+ }
+
+ if (type_match) {
+ /*
+ * If the filter is for current level entry, propagate
+ * filter marker to parents. The marker bit was
+ * already set by default so it only needs to clear
+ * non-filtered entries.
+ */
+ if (!(he->filtered & (1 << type))) {
+ while (parent) {
+ parent->filtered &= ~(1 << type);
+ parent = parent->parent_he;
+ }
+ }
+ } else {
+ /*
+ * If current entry doesn't have matching formats, set
+ * filter marker for upper level entries. it will be
+ * cleared if its lower level entries is not filtered.
+ *
+ * For lower-level entries, it inherits parent's
+ * filter bit so that lower level entries of a
+ * non-filtered entry won't set the filter marker.
+ */
+ if (parent == NULL)
+ he->filtered |= (1 << type);
+ else
+ he->filtered |= (parent->filtered & (1 << type));
+ }
+}
+
+static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
+{
+ hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
+ check_thread_entry);
+
+ hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
+ perf_hpp__is_dso_entry);
+
+ hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
+ perf_hpp__is_sym_entry);
+
+ hists__apply_filters(he->hists, he);
+}
+
+static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
+ struct rb_root *root,
+ struct hist_entry *he,
+ struct hist_entry *parent_he,
+ struct perf_hpp_list *hpp_list)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter, *new;
+ struct perf_hpp_fmt *fmt;
+ int64_t cmp;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node_in);
+
+ cmp = 0;
+ perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
+ cmp = fmt->collapse(fmt, iter, he);
+ if (cmp)
+ break;
+ }
+
+ if (!cmp) {
+ he_stat__add_stat(&iter->stat, &he->stat);
+ return iter;
+ }
+
+ if (cmp < 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+
+ new = hist_entry__new(he, true);
+ if (new == NULL)
+ return NULL;
+
+ hists->nr_entries++;
+
+ /* save related format list for output */
+ new->hpp_list = hpp_list;
+ new->parent_he = parent_he;
+
+ hist_entry__apply_hierarchy_filters(new);
+
+ /* some fields are now passed to 'new' */
+ perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
+ if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
+ he->trace_output = NULL;
+ else
+ new->trace_output = NULL;
+
+ if (perf_hpp__is_srcline_entry(fmt))
+ he->srcline = NULL;
+ else
+ new->srcline = NULL;
+
+ if (perf_hpp__is_srcfile_entry(fmt))
+ he->srcfile = NULL;
+ else
+ new->srcfile = NULL;
+ }
+
+ rb_link_node(&new->rb_node_in, parent, p);
+ rb_insert_color(&new->rb_node_in, root);
+ return new;
+}
+
+static int hists__hierarchy_insert_entry(struct hists *hists,
+ struct rb_root *root,
+ struct hist_entry *he)
+{
+ struct perf_hpp_list_node *node;
+ struct hist_entry *new_he = NULL;
+ struct hist_entry *parent = NULL;
+ int depth = 0;
+ int ret = 0;
+
+ list_for_each_entry(node, &hists->hpp_formats, list) {
+ /* skip period (overhead) and elided columns */
+ if (node->level == 0 || node->skip)
+ continue;
+
+ /* insert copy of 'he' for each fmt into the hierarchy */
+ new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
+ if (new_he == NULL) {
+ ret = -1;
+ break;
+ }
+
+ root = &new_he->hroot_in;
+ new_he->depth = depth++;
+ parent = new_he;
+ }
+
+ if (new_he) {
+ new_he->leaf = true;
+
+ if (symbol_conf.use_callchain) {
+ callchain_cursor_reset(&callchain_cursor);
+ if (callchain_merge(&callchain_cursor,
+ new_he->callchain,
+ he->callchain) < 0)
+ ret = -1;
+ }
+ }
+
+ /* 'he' is no longer used */
+ hist_entry__delete(he);
+
+ /* return 0 (or -1) since it already applied filters */
+ return ret;
+}
+
+int hists__collapse_insert_entry(struct hists *hists, struct rb_root *root,
+ struct hist_entry *he)
{
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
int64_t cmp;
+ if (symbol_conf.report_hierarchy)
+ return hists__hierarchy_insert_entry(hists, root, he);
+
while (*p != NULL) {
parent = *p;
iter = rb_entry(parent, struct hist_entry, rb_node_in);
@@ -1024,18 +1313,21 @@ bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
cmp = hist_entry__collapse(iter, he);
if (!cmp) {
+ int ret = 0;
+
he_stat__add_stat(&iter->stat, &he->stat);
if (symbol_conf.cumulate_callchain)
he_stat__add_stat(iter->stat_acc, he->stat_acc);
if (symbol_conf.use_callchain) {
callchain_cursor_reset(&callchain_cursor);
- callchain_merge(&callchain_cursor,
- iter->callchain,
- he->callchain);
+ if (callchain_merge(&callchain_cursor,
+ iter->callchain,
+ he->callchain) < 0)
+ ret = -1;
}
hist_entry__delete(he);
- return false;
+ return ret;
}
if (cmp < 0)
@@ -1047,7 +1339,7 @@ bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
- return true;
+ return 1;
}
struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
@@ -1073,14 +1365,15 @@ static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
hists__filter_entry_by_socket(hists, he);
}
-void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
+int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
{
struct rb_root *root;
struct rb_node *next;
struct hist_entry *n;
+ int ret;
if (!sort__need_collapse)
- return;
+ return 0;
hists->nr_entries = 0;
@@ -1095,7 +1388,11 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
next = rb_next(&n->rb_node_in);
rb_erase(&n->rb_node_in, root);
- if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
+ ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
+ if (ret < 0)
+ return -1;
+
+ if (ret) {
/*
* If it wasn't combined with one of the entries already
* collapsed, we need to apply the filters that may have
@@ -1106,14 +1403,16 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
if (prog)
ui_progress__update(prog, 1);
}
+ return 0;
}
static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
{
+ struct hists *hists = a->hists;
struct perf_hpp_fmt *fmt;
int64_t cmp = 0;
- perf_hpp__for_each_sort_list(fmt) {
+ hists__for_each_sort_list(hists, fmt) {
if (perf_hpp__should_skip(fmt, a->hists))
continue;
@@ -1154,6 +1453,113 @@ void hists__inc_stats(struct hists *hists, struct hist_entry *h)
hists->stats.total_period += h->stat.period;
}
+static void hierarchy_recalc_total_periods(struct hists *hists)
+{
+ struct rb_node *node;
+ struct hist_entry *he;
+
+ node = rb_first(&hists->entries);
+
+ hists->stats.total_period = 0;
+ hists->stats.total_non_filtered_period = 0;
+
+ /*
+ * recalculate total period using top-level entries only
+ * since lower level entries only see non-filtered entries
+ * but upper level entries have sum of both entries.
+ */
+ while (node) {
+ he = rb_entry(node, struct hist_entry, rb_node);
+ node = rb_next(node);
+
+ hists->stats.total_period += he->stat.period;
+ if (!he->filtered)
+ hists->stats.total_non_filtered_period += he->stat.period;
+ }
+}
+
+static void hierarchy_insert_output_entry(struct rb_root *root,
+ struct hist_entry *he)
+{
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+ struct perf_hpp_fmt *fmt;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ if (hist_entry__sort(he, iter) > 0)
+ p = &parent->rb_left;
+ else
+ p = &parent->rb_right;
+ }
+
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, root);
+
+ /* update column width of dynamic entry */
+ perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt))
+ fmt->sort(fmt, he, NULL);
+ }
+}
+
+static void hists__hierarchy_output_resort(struct hists *hists,
+ struct ui_progress *prog,
+ struct rb_root *root_in,
+ struct rb_root *root_out,
+ u64 min_callchain_hits,
+ bool use_callchain)
+{
+ struct rb_node *node;
+ struct hist_entry *he;
+
+ *root_out = RB_ROOT;
+ node = rb_first(root_in);
+
+ while (node) {
+ he = rb_entry(node, struct hist_entry, rb_node_in);
+ node = rb_next(node);
+
+ hierarchy_insert_output_entry(root_out, he);
+
+ if (prog)
+ ui_progress__update(prog, 1);
+
+ if (!he->leaf) {
+ hists__hierarchy_output_resort(hists, prog,
+ &he->hroot_in,
+ &he->hroot_out,
+ min_callchain_hits,
+ use_callchain);
+ hists->nr_entries++;
+ if (!he->filtered) {
+ hists->nr_non_filtered_entries++;
+ hists__calc_col_len(hists, he);
+ }
+
+ continue;
+ }
+
+ if (!use_callchain)
+ continue;
+
+ if (callchain_param.mode == CHAIN_GRAPH_REL) {
+ u64 total = he->stat.period;
+
+ if (symbol_conf.cumulate_callchain)
+ total = he->stat_acc->period;
+
+ min_callchain_hits = total * (callchain_param.min_percent / 100);
+ }
+
+ callchain_param.sort(&he->sorted_chain, he->callchain,
+ min_callchain_hits, &callchain_param);
+ }
+}
+
static void __hists__insert_output_entry(struct rb_root *entries,
struct hist_entry *he,
u64 min_callchain_hits,
@@ -1162,10 +1568,20 @@ static void __hists__insert_output_entry(struct rb_root *entries,
struct rb_node **p = &entries->rb_node;
struct rb_node *parent = NULL;
struct hist_entry *iter;
+ struct perf_hpp_fmt *fmt;
+
+ if (use_callchain) {
+ if (callchain_param.mode == CHAIN_GRAPH_REL) {
+ u64 total = he->stat.period;
+
+ if (symbol_conf.cumulate_callchain)
+ total = he->stat_acc->period;
- if (use_callchain)
+ min_callchain_hits = total * (callchain_param.min_percent / 100);
+ }
callchain_param.sort(&he->sorted_chain, he->callchain,
min_callchain_hits, &callchain_param);
+ }
while (*p != NULL) {
parent = *p;
@@ -1179,23 +1595,41 @@ static void __hists__insert_output_entry(struct rb_root *entries,
rb_link_node(&he->rb_node, parent, p);
rb_insert_color(&he->rb_node, entries);
+
+ perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
+ if (perf_hpp__is_dynamic_entry(fmt) &&
+ perf_hpp__defined_dynamic_entry(fmt, he->hists))
+ fmt->sort(fmt, he, NULL); /* update column width */
+ }
}
-void hists__output_resort(struct hists *hists, struct ui_progress *prog)
+static void output_resort(struct hists *hists, struct ui_progress *prog,
+ bool use_callchain)
{
struct rb_root *root;
struct rb_node *next;
struct hist_entry *n;
+ u64 callchain_total;
u64 min_callchain_hits;
- struct perf_evsel *evsel = hists_to_evsel(hists);
- bool use_callchain;
- if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
- use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
- else
- use_callchain = symbol_conf.use_callchain;
+ callchain_total = hists->callchain_period;
+ if (symbol_conf.filter_relative)
+ callchain_total = hists->callchain_non_filtered_period;
- min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
+ min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
+
+ hists__reset_stats(hists);
+ hists__reset_col_len(hists);
+
+ if (symbol_conf.report_hierarchy) {
+ hists__hierarchy_output_resort(hists, prog,
+ &hists->entries_collapsed,
+ &hists->entries,
+ min_callchain_hits,
+ use_callchain);
+ hierarchy_recalc_total_periods(hists);
+ return;
+ }
if (sort__need_collapse)
root = &hists->entries_collapsed;
@@ -1205,9 +1639,6 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog)
next = rb_first(root);
hists->entries = RB_ROOT;
- hists__reset_stats(hists);
- hists__reset_col_len(hists);
-
while (next) {
n = rb_entry(next, struct hist_entry, rb_node_in);
next = rb_next(&n->rb_node_in);
@@ -1223,15 +1654,136 @@ void hists__output_resort(struct hists *hists, struct ui_progress *prog)
}
}
+void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
+{
+ bool use_callchain;
+
+ if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
+ use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
+ else
+ use_callchain = symbol_conf.use_callchain;
+
+ output_resort(evsel__hists(evsel), prog, use_callchain);
+}
+
+void hists__output_resort(struct hists *hists, struct ui_progress *prog)
+{
+ output_resort(hists, prog, symbol_conf.use_callchain);
+}
+
+static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
+{
+ if (he->leaf || hmd == HMD_FORCE_SIBLING)
+ return false;
+
+ if (he->unfolded || hmd == HMD_FORCE_CHILD)
+ return true;
+
+ return false;
+}
+
+struct rb_node *rb_hierarchy_last(struct rb_node *node)
+{
+ struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
+
+ while (can_goto_child(he, HMD_NORMAL)) {
+ node = rb_last(&he->hroot_out);
+ he = rb_entry(node, struct hist_entry, rb_node);
+ }
+ return node;
+}
+
+struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
+{
+ struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
+
+ if (can_goto_child(he, hmd))
+ node = rb_first(&he->hroot_out);
+ else
+ node = rb_next(node);
+
+ while (node == NULL) {
+ he = he->parent_he;
+ if (he == NULL)
+ break;
+
+ node = rb_next(&he->rb_node);
+ }
+ return node;
+}
+
+struct rb_node *rb_hierarchy_prev(struct rb_node *node)
+{
+ struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
+
+ node = rb_prev(node);
+ if (node)
+ return rb_hierarchy_last(node);
+
+ he = he->parent_he;
+ if (he == NULL)
+ return NULL;
+
+ return &he->rb_node;
+}
+
+bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
+{
+ struct rb_node *node;
+ struct hist_entry *child;
+ float percent;
+
+ if (he->leaf)
+ return false;
+
+ node = rb_first(&he->hroot_out);
+ child = rb_entry(node, struct hist_entry, rb_node);
+
+ while (node && child->filtered) {
+ node = rb_next(node);
+ child = rb_entry(node, struct hist_entry, rb_node);
+ }
+
+ if (node)
+ percent = hist_entry__get_percent_limit(child);
+ else
+ percent = 0;
+
+ return node && percent >= limit;
+}
+
static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
enum hist_filter filter)
{
h->filtered &= ~(1 << filter);
+
+ if (symbol_conf.report_hierarchy) {
+ struct hist_entry *parent = h->parent_he;
+
+ while (parent) {
+ he_stat__add_stat(&parent->stat, &h->stat);
+
+ parent->filtered &= ~(1 << filter);
+
+ if (parent->filtered)
+ goto next;
+
+ /* force fold unfiltered entry for simplicity */
+ parent->unfolded = false;
+ parent->has_no_entry = false;
+ parent->row_offset = 0;
+ parent->nr_rows = 0;
+next:
+ parent = parent->parent_he;
+ }
+ }
+
if (h->filtered)
return;
/* force fold unfiltered entry for simplicity */
h->unfolded = false;
+ h->has_no_entry = false;
h->row_offset = 0;
h->nr_rows = 0;
@@ -1254,28 +1806,6 @@ static bool hists__filter_entry_by_dso(struct hists *hists,
return false;
}
-void hists__filter_by_dso(struct hists *hists)
-{
- struct rb_node *nd;
-
- hists->stats.nr_non_filtered_samples = 0;
-
- hists__reset_filter_stats(hists);
- hists__reset_col_len(hists);
-
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
- struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-
- if (symbol_conf.exclude_other && !h->parent)
- continue;
-
- if (hists__filter_entry_by_dso(hists, h))
- continue;
-
- hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
- }
-}
-
static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he)
{
@@ -1288,25 +1818,6 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
return false;
}
-void hists__filter_by_thread(struct hists *hists)
-{
- struct rb_node *nd;
-
- hists->stats.nr_non_filtered_samples = 0;
-
- hists__reset_filter_stats(hists);
- hists__reset_col_len(hists);
-
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
- struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
-
- if (hists__filter_entry_by_thread(hists, h))
- continue;
-
- hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
- }
-}
-
static bool hists__filter_entry_by_symbol(struct hists *hists,
struct hist_entry *he)
{
@@ -1320,7 +1831,21 @@ static bool hists__filter_entry_by_symbol(struct hists *hists,
return false;
}
-void hists__filter_by_symbol(struct hists *hists)
+static bool hists__filter_entry_by_socket(struct hists *hists,
+ struct hist_entry *he)
+{
+ if ((hists->socket_filter > -1) &&
+ (he->socket != hists->socket_filter)) {
+ he->filtered |= (1 << HIST_FILTER__SOCKET);
+ return true;
+ }
+
+ return false;
+}
+
+typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
+
+static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
{
struct rb_node *nd;
@@ -1332,42 +1857,155 @@ void hists__filter_by_symbol(struct hists *hists)
for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
- if (hists__filter_entry_by_symbol(hists, h))
+ if (filter(hists, h))
continue;
- hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
+ hists__remove_entry_filter(hists, h, type);
}
}
-static bool hists__filter_entry_by_socket(struct hists *hists,
- struct hist_entry *he)
+static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
{
- if ((hists->socket_filter > -1) &&
- (he->socket != hists->socket_filter)) {
- he->filtered |= (1 << HIST_FILTER__SOCKET);
- return true;
+ struct rb_node **p = &root->rb_node;
+ struct rb_node *parent = NULL;
+ struct hist_entry *iter;
+ struct rb_root new_root = RB_ROOT;
+ struct rb_node *nd;
+
+ while (*p != NULL) {
+ parent = *p;
+ iter = rb_entry(parent, struct hist_entry, rb_node);
+
+ if (hist_entry__sort(he, iter) > 0)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
}
- return false;
+ rb_link_node(&he->rb_node, parent, p);
+ rb_insert_color(&he->rb_node, root);
+
+ if (he->leaf || he->filtered)
+ return;
+
+ nd = rb_first(&he->hroot_out);
+ while (nd) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ nd = rb_next(nd);
+ rb_erase(&h->rb_node, &he->hroot_out);
+
+ resort_filtered_entry(&new_root, h);
+ }
+
+ he->hroot_out = new_root;
}
-void hists__filter_by_socket(struct hists *hists)
+static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
{
struct rb_node *nd;
+ struct rb_root new_root = RB_ROOT;
hists->stats.nr_non_filtered_samples = 0;
hists__reset_filter_stats(hists);
hists__reset_col_len(hists);
- for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
+ nd = rb_first(&hists->entries);
+ while (nd) {
struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+ int ret;
- if (hists__filter_entry_by_socket(hists, h))
- continue;
+ ret = hist_entry__filter(h, type, arg);
- hists__remove_entry_filter(hists, h, HIST_FILTER__SOCKET);
+ /*
+ * case 1. non-matching type
+ * zero out the period, set filter marker and move to child
+ */
+ if (ret < 0) {
+ memset(&h->stat, 0, sizeof(h->stat));
+ h->filtered |= (1 << type);
+
+ nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
+ }
+ /*
+ * case 2. matched type (filter out)
+ * set filter marker and move to next
+ */
+ else if (ret == 1) {
+ h->filtered |= (1 << type);
+
+ nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
+ }
+ /*
+ * case 3. ok (not filtered)
+ * add period to hists and parents, erase the filter marker
+ * and move to next sibling
+ */
+ else {
+ hists__remove_entry_filter(hists, h, type);
+
+ nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
+ }
+ }
+
+ hierarchy_recalc_total_periods(hists);
+
+ /*
+ * resort output after applying a new filter since filter in a lower
+ * hierarchy can change periods in a upper hierarchy.
+ */
+ nd = rb_first(&hists->entries);
+ while (nd) {
+ struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
+
+ nd = rb_next(nd);
+ rb_erase(&h->rb_node, &hists->entries);
+
+ resort_filtered_entry(&new_root, h);
}
+
+ hists->entries = new_root;
+}
+
+void hists__filter_by_thread(struct hists *hists)
+{
+ if (symbol_conf.report_hierarchy)
+ hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
+ hists->thread_filter);
+ else
+ hists__filter_by_type(hists, HIST_FILTER__THREAD,
+ hists__filter_entry_by_thread);
+}
+
+void hists__filter_by_dso(struct hists *hists)
+{
+ if (symbol_conf.report_hierarchy)
+ hists__filter_hierarchy(hists, HIST_FILTER__DSO,
+ hists->dso_filter);
+ else
+ hists__filter_by_type(hists, HIST_FILTER__DSO,
+ hists__filter_entry_by_dso);
+}
+
+void hists__filter_by_symbol(struct hists *hists)
+{
+ if (symbol_conf.report_hierarchy)
+ hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
+ hists->symbol_filter_str);
+ else
+ hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
+ hists__filter_entry_by_symbol);
+}
+
+void hists__filter_by_socket(struct hists *hists)
+{
+ if (symbol_conf.report_hierarchy)
+ hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
+ &hists->socket_filter);
+ else
+ hists__filter_by_type(hists, HIST_FILTER__SOCKET,
+ hists__filter_entry_by_socket);
}
void events_stats__inc(struct events_stats *stats, u32 type)
@@ -1585,7 +2223,7 @@ int perf_hist_config(const char *var, const char *value)
return 0;
}
-int __hists__init(struct hists *hists)
+int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
{
memset(hists, 0, sizeof(*hists));
hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
@@ -1594,6 +2232,8 @@ int __hists__init(struct hists *hists)
hists->entries = RB_ROOT;
pthread_mutex_init(&hists->lock, NULL);
hists->socket_filter = -1;
+ hists->hpp_list = hpp_list;
+ INIT_LIST_HEAD(&hists->hpp_formats);
return 0;
}
@@ -1622,15 +2262,26 @@ static void hists__delete_all_entries(struct hists *hists)
static void hists_evsel__exit(struct perf_evsel *evsel)
{
struct hists *hists = evsel__hists(evsel);
+ struct perf_hpp_fmt *fmt, *pos;
+ struct perf_hpp_list_node *node, *tmp;
hists__delete_all_entries(hists);
+
+ list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
+ perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
+ list_del(&fmt->list);
+ free(fmt);
+ }
+ list_del(&node->list);
+ free(node);
+ }
}
static int hists_evsel__init(struct perf_evsel *evsel)
{
struct hists *hists = evsel__hists(evsel);
- __hists__init(hists);
+ __hists__init(hists, &perf_hpp_list);
return 0;
}
@@ -1649,3 +2300,9 @@ int hists__init(void)
return err;
}
+
+void perf_hpp_list__init(struct perf_hpp_list *list)
+{
+ INIT_LIST_HEAD(&list->fields);
+ INIT_LIST_HEAD(&list->sorts);
+}