aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/tools/perf/util/hist.c
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@kernel.org>2016-07-05 08:56:03 +0200
committerArnaldo Carvalho de Melo <acme@redhat.com>2016-07-12 00:00:39 -0300
commit0a269a6bb3f86abb218b8632f13c4ecd9b6b92af (patch)
treee924bc0560aaa758b0721b1c30f1d9b0b4384fc2 /tools/perf/util/hist.c
parentRevert "perf/x86/intel, watchdog: Switch NMI watchdog to ref cycles on x86" (diff)
downloadwireguard-linux-0a269a6bb3f86abb218b8632f13c4ecd9b6b92af.tar.xz
wireguard-linux-0a269a6bb3f86abb218b8632f13c4ecd9b6b92af.zip
perf hists: Introduce hist_entry__init function
Move the 'struct hist_entry' initialization code to a separate function. It'll be useful and more clear for the following patches that introduce allocation callbacks. Releasing the hist_entry object in hist_entry__new function (where it's allocated) rather than in hist_entry__init. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: David Ahern <dsahern@gmail.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1467701765-26194-2-git-send-email-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/hist.c')
-rw-r--r--tools/perf/util/hist.c139
1 files changed, 73 insertions, 66 deletions
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index e1fcc8d7c01a..04f3b52a319c 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -352,86 +352,93 @@ void hists__delete_entries(struct hists *hists)
* histogram, sorted on item, collects periods
*/
-static struct hist_entry *hist_entry__new(struct hist_entry *template,
- bool sample_self)
-{
- size_t callchain_size = 0;
- struct hist_entry *he;
+static int hist_entry__init(struct hist_entry *he,
+ struct hist_entry *template,
+ bool sample_self)
+{
+ *he = *template;
+
+ if (symbol_conf.cumulate_callchain) {
+ he->stat_acc = malloc(sizeof(he->stat));
+ if (he->stat_acc == NULL)
+ return -ENOMEM;
+ memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
+ if (!sample_self)
+ memset(&he->stat, 0, sizeof(he->stat));
+ }
- if (symbol_conf.use_callchain)
- callchain_size = sizeof(struct callchain_root);
+ map__get(he->ms.map);
- he = zalloc(sizeof(*he) + callchain_size);
+ if (he->branch_info) {
+ /*
+ * This branch info is (a part of) allocated from
+ * sample__resolve_bstack() and will be freed after
+ * adding new entries. So we need to save a copy.
+ */
+ he->branch_info = malloc(sizeof(*he->branch_info));
+ if (he->branch_info == NULL) {
+ map__zput(he->ms.map);
+ free(he->stat_acc);
+ return -ENOMEM;
+ }
- if (he != NULL) {
- *he = *template;
+ memcpy(he->branch_info, template->branch_info,
+ sizeof(*he->branch_info));
- if (symbol_conf.cumulate_callchain) {
- he->stat_acc = malloc(sizeof(he->stat));
- if (he->stat_acc == NULL) {
- free(he);
- return NULL;
- }
- memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
- if (!sample_self)
- memset(&he->stat, 0, sizeof(he->stat));
- }
+ map__get(he->branch_info->from.map);
+ map__get(he->branch_info->to.map);
+ }
- map__get(he->ms.map);
+ if (he->mem_info) {
+ map__get(he->mem_info->iaddr.map);
+ map__get(he->mem_info->daddr.map);
+ }
- if (he->branch_info) {
- /*
- * This branch info is (a part of) allocated from
- * sample__resolve_bstack() and will be freed after
- * adding new entries. So we need to save a copy.
- */
- he->branch_info = malloc(sizeof(*he->branch_info));
- if (he->branch_info == NULL) {
- map__zput(he->ms.map);
- free(he->stat_acc);
- free(he);
- return NULL;
- }
+ if (symbol_conf.use_callchain)
+ callchain_init(he->callchain);
- memcpy(he->branch_info, template->branch_info,
- sizeof(*he->branch_info));
+ if (he->raw_data) {
+ he->raw_data = memdup(he->raw_data, he->raw_size);
- map__get(he->branch_info->from.map);
- map__get(he->branch_info->to.map);
+ if (he->raw_data == NULL) {
+ map__put(he->ms.map);
+ if (he->branch_info) {
+ map__put(he->branch_info->from.map);
+ map__put(he->branch_info->to.map);
+ free(he->branch_info);
+ }
+ if (he->mem_info) {
+ map__put(he->mem_info->iaddr.map);
+ map__put(he->mem_info->daddr.map);
+ }
+ free(he->stat_acc);
+ return -ENOMEM;
}
+ }
+ INIT_LIST_HEAD(&he->pairs.node);
+ thread__get(he->thread);
- if (he->mem_info) {
- map__get(he->mem_info->iaddr.map);
- map__get(he->mem_info->daddr.map);
- }
+ if (!symbol_conf.report_hierarchy)
+ he->leaf = true;
- if (symbol_conf.use_callchain)
- callchain_init(he->callchain);
+ return 0;
+}
- if (he->raw_data) {
- he->raw_data = memdup(he->raw_data, he->raw_size);
+static struct hist_entry *hist_entry__new(struct hist_entry *template,
+ bool sample_self)
+{
+ size_t callchain_size = 0;
+ struct hist_entry *he;
+ int err = 0;
- if (he->raw_data == NULL) {
- map__put(he->ms.map);
- if (he->branch_info) {
- map__put(he->branch_info->from.map);
- map__put(he->branch_info->to.map);
- free(he->branch_info);
- }
- if (he->mem_info) {
- map__put(he->mem_info->iaddr.map);
- map__put(he->mem_info->daddr.map);
- }
- free(he->stat_acc);
- free(he);
- return NULL;
- }
- }
- INIT_LIST_HEAD(&he->pairs.node);
- thread__get(he->thread);
+ if (symbol_conf.use_callchain)
+ callchain_size = sizeof(struct callchain_root);
- if (!symbol_conf.report_hierarchy)
- he->leaf = true;
+ he = zalloc(sizeof(*he) + callchain_size);
+ if (he) {
+ err = hist_entry__init(he, template, sample_self);
+ if (err)
+ zfree(&he);
}
return he;