aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/uapi/linux/perf_event.h
diff options
context:
space:
mode:
authorAndi Kleen <ak@linux.intel.com>2015-10-20 11:46:34 -0700
committerIngo Molnar <mingo@kernel.org>2015-11-23 09:58:25 +0100
commitb16a5b52eb90d92b597257778e51e1fdc6423e64 (patch)
tree83dca099f0269bd528a1c59174c5d7bd7067c5b5 /include/uapi/linux/perf_event.h
parentperf/x86: Optimize stack walk user accesses (diff)
downloadwireguard-linux-b16a5b52eb90d92b597257778e51e1fdc6423e64.tar.xz
wireguard-linux-b16a5b52eb90d92b597257778e51e1fdc6423e64.zip
perf/x86: Add option to disable reading branch flags/cycles
With LBRv5 reading the extra LBR flags like mispredict, TSX, cycles is not free anymore, as it has moved to a separate MSR. For callstack mode we don't need any of this information; so we can avoid the unnecessary MSR read. Add flags to the perf interface where perf record can request not collecting this information. Add branch_sample_type flags for CYCLES and FLAGS. It's a bit unusual for branch_sample_types to be negative (disable), not positive (enable), but since the legacy ABI reported the flags we need some form of explicit disabling to avoid breaking the ABI. After we have the flags the x86 perf code can keep track if any users need the flags. If noone needs it the information is not collected. This cuts down the cost of LBR callstack on Skylake significantly. Profiling a kernel build with LBR call stack the average run time of the PMI handler drops by 43%. Signed-off-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: acme@kernel.org Cc: jolsa@kernel.org Link: http://lkml.kernel.org/r/1445366797-30894-2-git-send-email-andi@firstfloor.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/uapi/linux/perf_event.h')
-rw-r--r--include/uapi/linux/perf_event.h6
1 files changed, 6 insertions, 0 deletions
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index d801bb0d9f6d..1afe9623c1a7 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -171,6 +171,9 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT = 12, /* indirect jumps */
PERF_SAMPLE_BRANCH_CALL_SHIFT = 13, /* direct call */
+ PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT = 14, /* no flags */
+ PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT = 15, /* no cycles */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -192,6 +195,9 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_IND_JUMP = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
PERF_SAMPLE_BRANCH_CALL = 1U << PERF_SAMPLE_BRANCH_CALL_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << PERF_SAMPLE_BRANCH_NO_FLAGS_SHIFT,
+ PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << PERF_SAMPLE_BRANCH_NO_CYCLES_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};