aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/events/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/events/core.c')
-rw-r--r--arch/x86/events/core.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index f315425d8468..ceb712b0a1c6 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -561,14 +561,14 @@ int x86_pmu_hw_config(struct perf_event *event)
}
/* sample_regs_user never support XMM registers */
- if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS))
+ if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK))
return -EINVAL;
/*
* Besides the general purpose registers, XMM registers may
* be collected in PEBS on some platforms, e.g. Icelake
*/
- if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) {
- if (x86_pmu.pebs_no_xmm_regs)
+ if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) {
+ if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS))
return -EINVAL;
if (!event->attr.precise_ip)
@@ -2179,7 +2179,7 @@ static void x86_pmu_event_mapped(struct perf_event *event, struct mm_struct *mm)
* For now, this can't happen because all callers hold mmap_sem
* for write. If this changes, we'll need a different solution.
*/
- lockdep_assert_held_exclusive(&mm->mmap_sem);
+ lockdep_assert_held_write(&mm->mmap_sem);
if (atomic_inc_return(&mm->context.perf_rdpmc_allowed) == 1)
on_each_cpu_mask(mm_cpumask(mm), refresh_pce, NULL, 1);
@@ -2402,13 +2402,13 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
return;
}
- if (perf_hw_regs(regs)) {
- if (perf_callchain_store(entry, regs->ip))
- return;
+ if (perf_callchain_store(entry, regs->ip))
+ return;
+
+ if (perf_hw_regs(regs))
unwind_start(&state, current, regs, NULL);
- } else {
+ else
unwind_start(&state, current, NULL, (void *)regs->sp);
- }
for (; !unwind_done(&state); unwind_next_frame(&state)) {
addr = unwind_get_return_address(&state);