aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/perf/util/hist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/hist.c')
-rw-r--r--tools/perf/util/hist.c235
1 files changed, 149 insertions, 86 deletions
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 8a793e4c9400..fa359180ebf8 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -4,6 +4,7 @@
#include "dso.h"
#include "build-id.h"
#include "hist.h"
+#include "kvm-stat.h"
#include "map.h"
#include "map_symbol.h"
#include "branch.h"
@@ -105,7 +106,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__set_col_len(hists, HISTC_THREAD, len + 8);
if (h->ms.map) {
- len = dso__name_len(h->ms.map->dso);
+ len = dso__name_len(map__dso(h->ms.map));
hists__new_col_len(hists, HISTC_DSO, len);
}
@@ -119,11 +120,12 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
symlen += BITS_PER_LONG / 4 + 2 + 3;
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
- symlen = dso__name_len(h->branch_info->from.ms.map->dso);
+ symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
+ hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
}
@@ -133,11 +135,12 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
symlen += BITS_PER_LONG / 4 + 2 + 3;
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
- symlen = dso__name_len(h->branch_info->to.ms.map->dso);
+ symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
hists__new_col_len(hists, HISTC_DSO_TO, symlen);
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
+ hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
}
@@ -177,7 +180,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
}
if (h->mem_info->daddr.ms.map) {
- symlen = dso__name_len(h->mem_info->daddr.ms.map->dso);
+ symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map));
hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
symlen);
} else {
@@ -188,6 +191,9 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
unresolved_col_width + 4 + 2);
+ hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
+ unresolved_col_width + 4 + 2);
+
} else {
symlen = unresolved_col_width + 4 + 2;
hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
@@ -202,13 +208,21 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
hists__new_col_len(hists, HISTC_MEM_TLB, 22);
hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
- hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
+ hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
+ hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
+ hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
+ hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
+ hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
+ hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
+ hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
+
if (symbol_conf.nanosecs)
hists__new_col_len(hists, HISTC_TIME, 16);
else
hists__new_col_len(hists, HISTC_TIME, 12);
+ hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
if (h->srcline) {
len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
@@ -227,7 +241,7 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
if (h->cgroup) {
const char *cgrp_name = "unknown";
- struct cgroup *cgrp = cgroup__find(h->ms.maps->machine->env,
+ struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
h->cgroup);
if (cgrp != NULL)
cgrp_name = cgrp->name;
@@ -281,12 +295,9 @@ static long hist_time(unsigned long htime)
return htime;
}
-static void he_stat__add_period(struct he_stat *he_stat, u64 period,
- u64 weight)
+static void he_stat__add_period(struct he_stat *he_stat, u64 period)
{
-
he_stat->period += period;
- he_stat->weight += weight;
he_stat->nr_events += 1;
}
@@ -298,7 +309,6 @@ static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
dest->period_guest_sys += src->period_guest_sys;
dest->period_guest_us += src->period_guest_us;
dest->nr_events += src->nr_events;
- dest->weight += src->weight;
}
static void he_stat__decay(struct he_stat *he_stat)
@@ -440,7 +450,8 @@ static int hist_entry__init(struct hist_entry *he,
memset(&he->stat, 0, sizeof(he->stat));
}
- map__get(he->ms.map);
+ he->ms.maps = maps__get(he->ms.maps);
+ he->ms.map = map__get(he->ms.map);
if (he->branch_info) {
/*
@@ -455,13 +466,13 @@ static int hist_entry__init(struct hist_entry *he,
memcpy(he->branch_info, template->branch_info,
sizeof(*he->branch_info));
- map__get(he->branch_info->from.ms.map);
- map__get(he->branch_info->to.ms.map);
+ he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
+ he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
}
if (he->mem_info) {
- map__get(he->mem_info->iaddr.ms.map);
- map__get(he->mem_info->daddr.ms.map);
+ he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map);
+ he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map);
}
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
@@ -473,21 +484,21 @@ static int hist_entry__init(struct hist_entry *he,
goto err_infos;
}
- if (he->srcline) {
+ if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
he->srcline = strdup(he->srcline);
if (he->srcline == NULL)
goto err_rawdata;
}
if (symbol_conf.res_sample) {
- he->res_samples = calloc(sizeof(struct res_sample),
- symbol_conf.res_sample);
+ he->res_samples = calloc(symbol_conf.res_sample,
+ sizeof(struct res_sample));
if (!he->res_samples)
goto err_srcline;
}
INIT_LIST_HEAD(&he->pairs.node);
- thread__get(he->thread);
+ he->thread = thread__get(he->thread);
he->hroot_in = RB_ROOT_CACHED;
he->hroot_out = RB_ROOT_CACHED;
@@ -504,16 +515,16 @@ err_rawdata:
err_infos:
if (he->branch_info) {
- map__put(he->branch_info->from.ms.map);
- map__put(he->branch_info->to.ms.map);
+ map_symbol__exit(&he->branch_info->from.ms);
+ map_symbol__exit(&he->branch_info->to.ms);
zfree(&he->branch_info);
}
if (he->mem_info) {
- map__put(he->mem_info->iaddr.ms.map);
- map__put(he->mem_info->daddr.ms.map);
+ map_symbol__exit(&he->mem_info->iaddr.ms);
+ map_symbol__exit(&he->mem_info->daddr.ms);
}
err:
- map__zput(he->ms.map);
+ map_symbol__exit(&he->ms);
zfree(&he->stat_acc);
return -ENOMEM;
}
@@ -578,7 +589,7 @@ static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
static struct hist_entry *hists__findnew_entry(struct hists *hists,
struct hist_entry *entry,
- struct addr_location *al,
+ const struct addr_location *al,
bool sample_self)
{
struct rb_node **p;
@@ -586,7 +597,6 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
struct hist_entry *he;
int64_t cmp;
u64 period = entry->stat.period;
- u64 weight = entry->stat.weight;
bool leftmost = true;
p = &hists->entries_in->rb_root.rb_node;
@@ -602,14 +612,13 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
* keys were used.
*/
cmp = hist_entry__cmp(he, entry);
-
if (!cmp) {
if (sample_self) {
- he_stat__add_period(&he->stat, period, weight);
+ he_stat__add_period(&he->stat, period);
hist_entry__add_callchain_period(he, period);
}
if (symbol_conf.cumulate_callchain)
- he_stat__add_period(he->stat_acc, period, weight);
+ he_stat__add_period(he->stat_acc, period);
/*
* This mem info was allocated from sample__resolve_mem
@@ -619,6 +628,8 @@ static struct hist_entry *hists__findnew_entry(struct hists *hists,
block_info__zput(entry->block_info);
+ kvm_info__zput(entry->kvm_info);
+
/* If the map of an existing hist_entry has
* become out-of-date due to an exec() or
* similar, update it. Otherwise we will
@@ -690,6 +701,7 @@ __hists__add_entry(struct hists *hists,
struct symbol *sym_parent,
struct branch_info *bi,
struct mem_info *mi,
+ struct kvm_info *ki,
struct block_info *block_info,
struct perf_sample *sample,
bool sample_self,
@@ -715,22 +727,27 @@ __hists__add_entry(struct hists *hists,
.cpumode = al->cpumode,
.ip = al->addr,
.level = al->level,
+ .code_page_size = sample->code_page_size,
.stat = {
.nr_events = 1,
.period = sample->period,
- .weight = sample->weight,
},
.parent = sym_parent,
.filtered = symbol__parent_filter(sym_parent) | al->filtered,
.hists = hists,
.branch_info = bi,
.mem_info = mi,
+ .kvm_info = ki,
.block_info = block_info,
.transaction = sample->transaction,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
.ops = ops,
.time = hist_time(sample->time),
+ .weight = sample->weight,
+ .ins_lat = sample->ins_lat,
+ .p_stage_cyc = sample->p_stage_cyc,
+ .simd_flags = sample->simd_flags,
}, *he = hists__findnew_entry(hists, &entry, al, sample_self);
if (!hists->has_callchains && he && he->callchain_size != 0)
@@ -745,10 +762,11 @@ struct hist_entry *hists__add_entry(struct hists *hists,
struct symbol *sym_parent,
struct branch_info *bi,
struct mem_info *mi,
+ struct kvm_info *ki,
struct perf_sample *sample,
bool sample_self)
{
- return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
+ return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
sample, sample_self, NULL);
}
@@ -758,10 +776,11 @@ struct hist_entry *hists__add_entry_ops(struct hists *hists,
struct symbol *sym_parent,
struct branch_info *bi,
struct mem_info *mi,
+ struct kvm_info *ki,
struct perf_sample *sample,
bool sample_self)
{
- return __hists__add_entry(hists, al, sym_parent, bi, mi, NULL,
+ return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
sample, sample_self, ops);
}
@@ -835,7 +854,7 @@ iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al
*/
sample->period = cost;
- he = hists__add_entry(hists, al, iter->parent, NULL, mi,
+ he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
sample, true);
if (!he)
return -ENOMEM;
@@ -908,8 +927,10 @@ iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
if (iter->curr >= iter->total)
return 0;
- al->maps = bi[i].to.ms.maps;
- al->map = bi[i].to.ms.map;
+ maps__put(al->maps);
+ al->maps = maps__get(bi[i].to.ms.maps);
+ map__put(al->map);
+ al->map = map__get(bi[i].to.ms.map);
al->sym = bi[i].to.ms.sym;
al->addr = bi[i].to.addr;
return 1;
@@ -938,7 +959,7 @@ iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *a
sample->period = 1;
sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
- he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
+ he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
sample, true);
if (he == NULL)
return -ENOMEM;
@@ -976,7 +997,7 @@ iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location
struct hist_entry *he;
he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
- sample, true);
+ NULL, sample, true);
if (he == NULL)
return -ENOMEM;
@@ -1007,15 +1028,19 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
struct addr_location *al __maybe_unused)
{
struct hist_entry **he_cache;
+ struct callchain_cursor *cursor = get_tls_callchain_cursor();
- callchain_cursor_commit(&callchain_cursor);
+ if (cursor == NULL)
+ return -ENOMEM;
+
+ callchain_cursor_commit(cursor);
/*
* This is for detecting cycles or recursions so that they're
* cumulated only one time to prevent entries more than 100%
* overhead.
*/
- he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1));
+ he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
if (he_cache == NULL)
return -ENOMEM;
@@ -1036,7 +1061,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
struct hist_entry *he;
int err = 0;
- he = hists__add_entry(hists, al, iter->parent, NULL, NULL,
+ he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
sample, true);
if (he == NULL)
return -ENOMEM;
@@ -1050,7 +1075,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
* We need to re-initialize the cursor since callchain_append()
* advanced the cursor to the end.
*/
- callchain_cursor_commit(&callchain_cursor);
+ callchain_cursor_commit(get_tls_callchain_cursor());
hists__inc_nr_samples(hists, he->filtered);
@@ -1063,7 +1088,7 @@ iter_next_cumulative_entry(struct hist_entry_iter *iter,
{
struct callchain_cursor_node *node;
- node = callchain_cursor_current(&callchain_cursor);
+ node = callchain_cursor_current(get_tls_callchain_cursor());
if (node == NULL)
return 0;
@@ -1109,12 +1134,15 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
.raw_size = sample->raw_size,
};
int i;
- struct callchain_cursor cursor;
+ struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
bool fast = hists__has(he_tmp.hists, sym);
- callchain_cursor_snapshot(&cursor, &callchain_cursor);
+ if (tls_cursor == NULL)
+ return -ENOMEM;
+
+ callchain_cursor_snapshot(&cursor, tls_cursor);
- callchain_cursor_advance(&callchain_cursor);
+ callchain_cursor_advance(tls_cursor);
/*
* Check if there's duplicate entries in the callchain.
@@ -1137,7 +1165,7 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
}
he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
- sample, false);
+ NULL, sample, false);
if (he == NULL)
return -ENOMEM;
@@ -1200,7 +1228,7 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
if (al)
alm = map__get(al->map);
- err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
+ err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
iter->evsel, al, max_stack_depth);
if (err) {
map__put(alm);
@@ -1288,28 +1316,31 @@ void hist_entry__delete(struct hist_entry *he)
struct hist_entry_ops *ops = he->ops;
thread__zput(he->thread);
- map__zput(he->ms.map);
+ map_symbol__exit(&he->ms);
if (he->branch_info) {
- map__zput(he->branch_info->from.ms.map);
- map__zput(he->branch_info->to.ms.map);
- free_srcline(he->branch_info->srcline_from);
- free_srcline(he->branch_info->srcline_to);
+ map_symbol__exit(&he->branch_info->from.ms);
+ map_symbol__exit(&he->branch_info->to.ms);
+ zfree_srcline(&he->branch_info->srcline_from);
+ zfree_srcline(&he->branch_info->srcline_to);
zfree(&he->branch_info);
}
if (he->mem_info) {
- map__zput(he->mem_info->iaddr.ms.map);
- map__zput(he->mem_info->daddr.ms.map);
+ map_symbol__exit(&he->mem_info->iaddr.ms);
+ map_symbol__exit(&he->mem_info->daddr.ms);
mem_info__zput(he->mem_info);
}
if (he->block_info)
block_info__zput(he->block_info);
+ if (he->kvm_info)
+ kvm_info__zput(he->kvm_info);
+
zfree(&he->res_samples);
zfree(&he->stat_acc);
- free_srcline(he->srcline);
+ zfree_srcline(&he->srcline);
if (he->srcfile && he->srcfile[0])
zfree(&he->srcfile);
free_callchain(he->callchain);
@@ -1542,8 +1573,13 @@ static int hists__hierarchy_insert_entry(struct hists *hists,
if (hist_entry__has_callchains(new_he) &&
symbol_conf.use_callchain) {
- callchain_cursor_reset(&callchain_cursor);
- if (callchain_merge(&callchain_cursor,
+ struct callchain_cursor *cursor = get_tls_callchain_cursor();
+
+ if (cursor == NULL)
+ return -1;
+
+ callchain_cursor_reset(cursor);
+ if (callchain_merge(cursor,
new_he->callchain,
he->callchain) < 0)
ret = -1;
@@ -1584,11 +1620,15 @@ static int hists__collapse_insert_entry(struct hists *hists,
he_stat__add_stat(iter->stat_acc, he->stat_acc);
if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
- callchain_cursor_reset(&callchain_cursor);
- if (callchain_merge(&callchain_cursor,
- iter->callchain,
- he->callchain) < 0)
- ret = -1;
+ struct callchain_cursor *cursor = get_tls_callchain_cursor();
+
+ if (cursor != NULL) {
+ callchain_cursor_reset(cursor);
+ if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
+ ret = -1;
+ } else {
+ ret = 0;
+ }
}
hist_entry__delete(he);
return ret;
@@ -1612,13 +1652,13 @@ struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
{
struct rb_root_cached *root;
- pthread_mutex_lock(&hists->lock);
+ mutex_lock(&hists->lock);
root = hists->entries_in;
if (++hists->entries_in > &hists->entries_in_array[1])
hists->entries_in = &hists->entries_in_array[0];
- pthread_mutex_unlock(&hists->lock);
+ mutex_unlock(&hists->lock);
return root;
}
@@ -1770,8 +1810,8 @@ static void hierarchy_insert_output_entry(struct rb_root_cached *root,
/* update column width of dynamic entry */
perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
- if (perf_hpp__is_dynamic_entry(fmt))
- fmt->sort(fmt, he, NULL);
+ if (fmt->init)
+ fmt->init(fmt, he);
}
}
@@ -1868,10 +1908,10 @@ static void __hists__insert_output_entry(struct rb_root_cached *entries,
rb_link_node(&he->rb_node, parent, p);
rb_insert_color_cached(&he->rb_node, entries, leftmost);
+ /* update column width of dynamic entries */
perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
- if (perf_hpp__is_dynamic_entry(fmt) &&
- perf_hpp__defined_dynamic_entry(fmt, he->hists))
- fmt->sort(fmt, he, NULL); /* update column width */
+ if (fmt->init)
+ fmt->init(fmt, he);
}
}
@@ -2088,7 +2128,7 @@ static bool hists__filter_entry_by_dso(struct hists *hists,
struct hist_entry *he)
{
if (hists->dso_filter != NULL &&
- (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
+ (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) {
he->filtered |= (1 << HIST_FILTER__DSO);
return true;
}
@@ -2100,7 +2140,7 @@ static bool hists__filter_entry_by_thread(struct hists *hists,
struct hist_entry *he)
{
if (hists->thread_filter != NULL &&
- he->thread != hists->thread_filter) {
+ !RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
he->filtered |= (1 << HIST_FILTER__THREAD);
return true;
}
@@ -2308,18 +2348,28 @@ void events_stats__inc(struct events_stats *stats, u32 type)
++stats->nr_events[type];
}
-void hists__inc_nr_events(struct hists *hists, u32 type)
+static void hists_stats__inc(struct hists_stats *stats)
{
- events_stats__inc(&hists->stats, type);
+ ++stats->nr_samples;
+}
+
+void hists__inc_nr_events(struct hists *hists)
+{
+ hists_stats__inc(&hists->stats);
}
void hists__inc_nr_samples(struct hists *hists, bool filtered)
{
- events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
+ hists_stats__inc(&hists->stats);
if (!filtered)
hists->stats.nr_non_filtered_samples++;
}
+void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
+{
+ hists->stats.nr_lost_samples += lost;
+}
+
static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
struct hist_entry *pair)
{
@@ -2624,8 +2674,6 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
/* If we have branch cycles always annotate them. */
if (bs && bs->nr && entries[0].flags.cycles) {
- int i;
-
bi = sample__resolve_bstack(sample, al);
if (bi) {
struct addr_map_symbol *prev = NULL;
@@ -2640,7 +2688,7 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
* Note that perf stores branches reversed from
* program order!
*/
- for (i = bs->nr - 1; i >= 0; i--) {
+ for (int i = bs->nr - 1; i >= 0; i--) {
addr_map_symbol__account_cycles(&bi[i].from,
nonany_branch_mode ? NULL : prev,
bi[i].flags.cycles);
@@ -2649,19 +2697,34 @@ void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
if (total_cycles)
*total_cycles += bi[i].flags.cycles;
}
+ for (unsigned int i = 0; i < bs->nr; i++) {
+ map_symbol__exit(&bi[i].to.ms);
+ map_symbol__exit(&bi[i].from.ms);
+ }
free(bi);
}
}
}
-size_t perf_evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
+size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
+ bool skip_empty)
{
struct evsel *pos;
size_t ret = 0;
evlist__for_each_entry(evlist, pos) {
+ struct hists *hists = evsel__hists(pos);
+
+ if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples)
+ continue;
+
ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
- ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
+ if (hists->stats.nr_samples)
+ ret += fprintf(fp, "%16s events: %10d\n",
+ "SAMPLE", hists->stats.nr_samples);
+ if (hists->stats.nr_lost_samples)
+ ret += fprintf(fp, "%16s events: %10d\n",
+ "LOST_SAMPLES", hists->stats.nr_lost_samples);
}
return ret;
@@ -2681,7 +2744,7 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
const struct dso *dso = hists->dso_filter;
struct thread *thread = hists->thread_filter;
int socket_id = hists->socket_filter;
- unsigned long nr_samples = hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ unsigned long nr_samples = hists->stats.nr_samples;
u64 nr_events = hists->stats.total_period;
struct evsel *evsel = hists_to_evsel(hists);
const char *ev_name = evsel__name(evsel);
@@ -2708,7 +2771,7 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
nr_samples += pos_hists->stats.nr_non_filtered_samples;
nr_events += pos_hists->stats.total_non_filtered_period;
} else {
- nr_samples += pos_hists->stats.nr_events[PERF_RECORD_SAMPLE];
+ nr_samples += pos_hists->stats.nr_samples;
nr_events += pos_hists->stats.total_period;
}
}
@@ -2735,12 +2798,12 @@ int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool sh
if (hists__has(hists, thread)) {
printed += scnprintf(bf + printed, size - printed,
", Thread: %s(%d)",
- (thread->comm_set ? thread__comm_str(thread) : ""),
- thread->tid);
+ (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
+ thread__tid(thread));
} else {
printed += scnprintf(bf + printed, size - printed,
", Thread: %s",
- (thread->comm_set ? thread__comm_str(thread) : ""));
+ (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
}
}
if (dso)
@@ -2783,7 +2846,7 @@ int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
hists->entries_in = &hists->entries_in_array[0];
hists->entries_collapsed = RB_ROOT_CACHED;
hists->entries = RB_ROOT_CACHED;
- pthread_mutex_init(&hists->lock, NULL);
+ mutex_init(&hists->lock);
hists->socket_filter = -1;
hists->hpp_list = hpp_list;
INIT_LIST_HEAD(&hists->hpp_formats);