aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/stat-shadow.c
diff options
context:
space:
mode:
authorJiri Olsa <jolsa@kernel.org>2017-01-23 22:42:56 +0100
committerArnaldo Carvalho de Melo <acme@redhat.com>2017-10-30 13:40:33 -0300
commit54830dd0c342525de2ff10f8be7cf0a9f062b896 (patch)
tree52f57c985373b37014f824cba0d4f4e0efc0f169 /tools/perf/util/stat-shadow.c
parentperf tools: Add perf_data_file__write function (diff)
downloadlinux-dev-54830dd0c342525de2ff10f8be7cf0a9f062b896.tar.xz
linux-dev-54830dd0c342525de2ff10f8be7cf0a9f062b896.zip
perf stat: Move the shadow stats scale computation in perf_stat__update_shadow_stats
Move the shadow stats scale computation to the perf_stat__update_shadow_stats() function, so it's centralized and we don't forget to do it. It also saves few lines of code. Signed-off-by: Jiri Olsa <jolsa@kernel.org> Cc: Andi Kleen <andi@firstfloor.org> Cc: Changbin Du <changbin.du@intel.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jin Yao <yao.jin@linux.intel.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Wang Nan <wangnan0@huawei.com> Link: http://lkml.kernel.org/n/tip-htg7mmyxv6pcrf57qyo6msid@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/util/stat-shadow.c')
-rw-r--r--tools/perf/util/stat-shadow.c48
1 files changed, 25 insertions, 23 deletions
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index a2c12d1ef32a..51ad03a799ec 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -178,58 +178,60 @@ void perf_stat__reset_shadow_stats(void)
* more semantic information such as miss/hit ratios,
* instruction rates, etc:
*/
-void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
+void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
int cpu)
{
int ctx = evsel_context(counter);
+ count *= counter->scale;
+
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) ||
perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK))
- update_stats(&runtime_nsecs_stats[cpu], count[0]);
+ update_stats(&runtime_nsecs_stats[cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
- update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cycles_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
- update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TRANSACTION_START))
- update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_transaction_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, ELISION_START))
- update_stats(&runtime_elision_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_elision_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
- update_stats(&runtime_topdown_total_slots[ctx][cpu], count[0]);
+ update_stats(&runtime_topdown_total_slots[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
- update_stats(&runtime_topdown_slots_issued[ctx][cpu], count[0]);
+ update_stats(&runtime_topdown_slots_issued[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
- update_stats(&runtime_topdown_slots_retired[ctx][cpu], count[0]);
+ update_stats(&runtime_topdown_slots_retired[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
- update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu],count[0]);
+ update_stats(&runtime_topdown_fetch_bubbles[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
- update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count[0]);
+ update_stats(&runtime_topdown_recovery_bubbles[ctx][cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
- update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
- update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
- update_stats(&runtime_branches_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_branches_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
- update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_cacherefs_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
- update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_l1_dcache_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
- update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_ll_cache_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
- update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
- update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_itlb_cache_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, SMI_NUM))
- update_stats(&runtime_smi_num_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_smi_num_stats[ctx][cpu], count);
else if (perf_stat_evsel__is(counter, APERF))
- update_stats(&runtime_aperf_stats[ctx][cpu], count[0]);
+ update_stats(&runtime_aperf_stats[ctx][cpu], count);
if (counter->collect_stat) {
struct saved_value *v = saved_value_lookup(counter, cpu, true);
- update_stats(&v->stats, count[0]);
+ update_stats(&v->stats, count);
}
}