diff options
Diffstat (limited to 'arch/x86/events/intel/pt.c')
-rw-r--r-- | arch/x86/events/intel/pt.c | 163 |
1 files changed, 119 insertions, 44 deletions
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 8e2a12235e62..e8cf29d2b10c 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -18,11 +18,13 @@ #include <linux/slab.h> #include <linux/device.h> +#include <asm/cpuid/api.h> #include <asm/perf_event.h> #include <asm/insn.h> #include <asm/io.h> #include <asm/intel_pt.h> -#include <asm/intel-family.h> +#include <asm/cpu_device_id.h> +#include <asm/msr.h> #include "../perf_event.h" #include "pt.h" @@ -193,7 +195,7 @@ static int __init pt_pmu_hw_init(void) int ret; long i; - rdmsrl(MSR_PLATFORM_INFO, reg); + rdmsrq(MSR_PLATFORM_INFO, reg); pt_pmu.max_nonturbo_ratio = (reg & 0xff00) >> 8; /* @@ -201,21 +203,21 @@ static int __init pt_pmu_hw_init(void) * otherwise, zero for numerator stands for "not enumerated" * as per SDM */ - if (boot_cpu_data.cpuid_level >= CPUID_TSC_LEAF) { + if (boot_cpu_data.cpuid_level >= CPUID_LEAF_TSC) { u32 eax, ebx, ecx, edx; - cpuid(CPUID_TSC_LEAF, &eax, &ebx, &ecx, &edx); + cpuid(CPUID_LEAF_TSC, &eax, &ebx, &ecx, &edx); pt_pmu.tsc_art_num = ebx; pt_pmu.tsc_art_den = eax; } /* model-specific quirks */ - switch (boot_cpu_data.x86_model) { - case INTEL_FAM6_BROADWELL: - case INTEL_FAM6_BROADWELL_D: - case INTEL_FAM6_BROADWELL_G: - case INTEL_FAM6_BROADWELL_X: + switch (boot_cpu_data.x86_vfm) { + case INTEL_BROADWELL: + case INTEL_BROADWELL_D: + case INTEL_BROADWELL_G: + case INTEL_BROADWELL_X: /* not setting BRANCH_EN will #GP, erratum BDM106 */ pt_pmu.branch_en_always_on = true; break; @@ -229,7 +231,7 @@ static int __init pt_pmu_hw_init(void) * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace * post-VMXON. */ - rdmsrl(MSR_IA32_VMX_MISC, reg); + rdmsrq(MSR_IA32_VMX_MISC, reg); if (reg & BIT(14)) pt_pmu.vmx = true; } @@ -416,15 +418,18 @@ static bool pt_event_valid(struct perf_event *event) static void pt_config_start(struct perf_event *event) { struct pt *pt = this_cpu_ptr(&pt_ctx); - u64 ctl = event->hw.config; + u64 ctl = event->hw.aux_config; + + if (READ_ONCE(event->hw.aux_paused)) + return; ctl |= RTIT_CTL_TRACEEN; if (READ_ONCE(pt->vmx_on)) perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL); else - wrmsrl(MSR_IA32_RTIT_CTL, ctl); + wrmsrq(MSR_IA32_RTIT_CTL, ctl); - WRITE_ONCE(event->hw.config, ctl); + WRITE_ONCE(event->hw.aux_config, ctl); } /* Address ranges and their corresponding msr configuration registers */ @@ -481,12 +486,12 @@ static u64 pt_config_filters(struct perf_event *event) /* avoid redundant msr writes */ if (pt->filters.filter[range].msr_a != filter->msr_a) { - wrmsrl(pt_address_ranges[range].msr_a, filter->msr_a); + wrmsrq(pt_address_ranges[range].msr_a, filter->msr_a); pt->filters.filter[range].msr_a = filter->msr_a; } if (pt->filters.filter[range].msr_b != filter->msr_b) { - wrmsrl(pt_address_ranges[range].msr_b, filter->msr_b); + wrmsrq(pt_address_ranges[range].msr_b, filter->msr_b); pt->filters.filter[range].msr_b = filter->msr_b; } @@ -503,9 +508,9 @@ static void pt_config(struct perf_event *event) u64 reg; /* First round: clear STATUS, in particular the PSB byte counter. */ - if (!event->hw.config) { + if (!event->hw.aux_config) { perf_event_itrace_started(event); - wrmsrl(MSR_IA32_RTIT_STATUS, 0); + wrmsrq(MSR_IA32_RTIT_STATUS, 0); } reg = pt_config_filters(event); @@ -533,14 +538,31 @@ static void pt_config(struct perf_event *event) reg |= (event->attr.config & PT_CONFIG_MASK); - event->hw.config = reg; + event->hw.aux_config = reg; + + /* + * Allow resume before starting so as not to overwrite a value set by a + * PMI. + */ + barrier(); + WRITE_ONCE(pt->resume_allowed, 1); + /* Configuration is complete, it is now OK to handle an NMI */ + barrier(); + WRITE_ONCE(pt->handle_nmi, 1); + barrier(); pt_config_start(event); + barrier(); + /* + * Allow pause after starting so its pt_config_stop() doesn't race with + * pt_config_start(). + */ + WRITE_ONCE(pt->pause_allowed, 1); } static void pt_config_stop(struct perf_event *event) { struct pt *pt = this_cpu_ptr(&pt_ctx); - u64 ctl = READ_ONCE(event->hw.config); + u64 ctl = READ_ONCE(event->hw.aux_config); /* may be already stopped by a PMI */ if (!(ctl & RTIT_CTL_TRACEEN)) @@ -548,9 +570,9 @@ static void pt_config_stop(struct perf_event *event) ctl &= ~RTIT_CTL_TRACEEN; if (!READ_ONCE(pt->vmx_on)) - wrmsrl(MSR_IA32_RTIT_CTL, ctl); + wrmsrq(MSR_IA32_RTIT_CTL, ctl); - WRITE_ONCE(event->hw.config, ctl); + WRITE_ONCE(event->hw.aux_config, ctl); /* * A wrmsr that disables trace generation serializes other PT @@ -637,13 +659,13 @@ static void pt_config_buffer(struct pt_buffer *buf) reg = virt_to_phys(base); if (pt->output_base != reg) { pt->output_base = reg; - wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg); + wrmsrq(MSR_IA32_RTIT_OUTPUT_BASE, reg); } reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32); if (pt->output_mask != reg) { pt->output_mask = reg; - wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); + wrmsrq(MSR_IA32_RTIT_OUTPUT_MASK, reg); } } @@ -828,11 +850,13 @@ static void pt_buffer_advance(struct pt_buffer *buf) buf->cur_idx++; if (buf->cur_idx == buf->cur->last) { - if (buf->cur == buf->last) + if (buf->cur == buf->last) { buf->cur = buf->first; - else + buf->wrapped = true; + } else { buf->cur = list_entry(buf->cur->list.next, struct topa, list); + } buf->cur_idx = 0; } } @@ -846,8 +870,11 @@ static void pt_buffer_advance(struct pt_buffer *buf) static void pt_update_head(struct pt *pt) { struct pt_buffer *buf = perf_get_aux(&pt->handle); + bool wrapped = buf->wrapped; u64 topa_idx, base, old; + buf->wrapped = false; + if (buf->single) { local_set(&buf->data_size, buf->output_off); return; @@ -865,7 +892,7 @@ static void pt_update_head(struct pt *pt) } else { old = (local64_xchg(&buf->head, base) & ((buf->nr_pages << PAGE_SHIFT) - 1)); - if (base < old) + if (base < old || (base == old && wrapped)) base += buf->nr_pages << PAGE_SHIFT; local_add(base - old, &buf->data_size); @@ -878,7 +905,7 @@ static void pt_update_head(struct pt *pt) */ static void *pt_buffer_region(struct pt_buffer *buf) { - return phys_to_virt(TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT); + return phys_to_virt((phys_addr_t)TOPA_ENTRY(buf->cur, buf->cur_idx)->base << TOPA_SHIFT); } /** @@ -900,7 +927,7 @@ static void pt_handle_status(struct pt *pt) int advance = 0; u64 status; - rdmsrl(MSR_IA32_RTIT_STATUS, status); + rdmsrq(MSR_IA32_RTIT_STATUS, status); if (status & RTIT_STATUS_ERROR) { pr_err_ratelimited("ToPA ERROR encountered, trying to recover\n"); @@ -944,7 +971,7 @@ static void pt_handle_status(struct pt *pt) if (advance) pt_buffer_advance(buf); - wrmsrl(MSR_IA32_RTIT_STATUS, status); + wrmsrq(MSR_IA32_RTIT_STATUS, status); } /** @@ -959,12 +986,12 @@ static void pt_read_offset(struct pt_buffer *buf) struct topa_page *tp; if (!buf->single) { - rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); + rdmsrq(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); tp = phys_to_virt(pt->output_base); buf->cur = &tp->topa; } - rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); + rdmsrq(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); /* offset within current output region */ buf->output_off = pt->output_mask >> 32; /* index of current output region within this table */ @@ -990,7 +1017,7 @@ pt_topa_entry_for_page(struct pt_buffer *buf, unsigned int pg) * order allocations, there shouldn't be many of these. */ list_for_each_entry(topa, &buf->tables, list) { - if (topa->offset + topa->size > pg << PAGE_SHIFT) + if (topa->offset + topa->size > (unsigned long)pg << PAGE_SHIFT) goto found; } @@ -1511,6 +1538,7 @@ void intel_pt_interrupt(void) buf = perf_aux_output_begin(&pt->handle, event); if (!buf) { event->hw.state = PERF_HES_STOPPED; + WRITE_ONCE(pt->resume_allowed, 0); return; } @@ -1519,6 +1547,7 @@ void intel_pt_interrupt(void) ret = pt_buffer_reset_markers(buf, &pt->handle); if (ret) { perf_aux_output_end(&pt->handle, 0); + WRITE_ONCE(pt->resume_allowed, 0); return; } @@ -1557,7 +1586,7 @@ void intel_pt_handle_vmx(int on) /* Turn PTs back on */ if (!on && event) - wrmsrl(MSR_IA32_RTIT_CTL, event->hw.config); + wrmsrq(MSR_IA32_RTIT_CTL, event->hw.aux_config); local_irq_restore(flags); } @@ -1573,6 +1602,26 @@ static void pt_event_start(struct perf_event *event, int mode) struct pt *pt = this_cpu_ptr(&pt_ctx); struct pt_buffer *buf; + if (mode & PERF_EF_RESUME) { + if (READ_ONCE(pt->resume_allowed)) { + u64 status; + + /* + * Only if the trace is not active and the error and + * stopped bits are clear, is it safe to start, but a + * PMI might have just cleared these, so resume_allowed + * must be checked again also. + */ + rdmsrq(MSR_IA32_RTIT_STATUS, status); + if (!(status & (RTIT_STATUS_TRIGGEREN | + RTIT_STATUS_ERROR | + RTIT_STATUS_STOPPED)) && + READ_ONCE(pt->resume_allowed)) + pt_config_start(event); + } + return; + } + buf = perf_aux_output_begin(&pt->handle, event); if (!buf) goto fail_stop; @@ -1583,7 +1632,6 @@ static void pt_event_start(struct perf_event *event, int mode) goto fail_end_stop; } - WRITE_ONCE(pt->handle_nmi, 1); hwc->state = 0; pt_config_buffer(buf); @@ -1601,11 +1649,27 @@ static void pt_event_stop(struct perf_event *event, int mode) { struct pt *pt = this_cpu_ptr(&pt_ctx); + if (mode & PERF_EF_PAUSE) { + if (READ_ONCE(pt->pause_allowed)) + pt_config_stop(event); + return; + } + /* * Protect against the PMI racing with disabling wrmsr, * see comment in intel_pt_interrupt(). */ WRITE_ONCE(pt->handle_nmi, 0); + barrier(); + + /* + * Prevent a resume from attempting to restart tracing, or a pause + * during a subsequent start. Do this after clearing handle_nmi so that + * pt_event_snapshot_aux() will not re-allow them. + */ + WRITE_ONCE(pt->pause_allowed, 0); + WRITE_ONCE(pt->resume_allowed, 0); + barrier(); pt_config_stop(event); @@ -1656,12 +1720,15 @@ static long pt_event_snapshot_aux(struct perf_event *event, if (WARN_ON_ONCE(!buf->snapshot)) return 0; + /* Prevent pause/resume from attempting to start/stop tracing */ + WRITE_ONCE(pt->pause_allowed, 0); + WRITE_ONCE(pt->resume_allowed, 0); + barrier(); /* - * Here, handle_nmi tells us if the tracing is on + * There is no PT interrupt in this mode, so stop the trace and it will + * remain stopped while the buffer is copied. */ - if (READ_ONCE(pt->handle_nmi)) - pt_config_stop(event); - + pt_config_stop(event); pt_read_offset(buf); pt_update_head(pt); @@ -1673,12 +1740,16 @@ static long pt_event_snapshot_aux(struct perf_event *event, ret = perf_output_copy_aux(&pt->handle, handle, from, to); /* - * If the tracing was on when we turned up, restart it. - * Compiler barrier not needed as we couldn't have been - * preempted by anything that touches pt->handle_nmi. + * Here, handle_nmi tells us if the tracing was on. + * If the tracing was on, restart it. */ - if (pt->handle_nmi) + if (READ_ONCE(pt->handle_nmi)) { + WRITE_ONCE(pt->resume_allowed, 1); + barrier(); pt_config_start(event); + barrier(); + WRITE_ONCE(pt->pause_allowed, 1); + } return ret; } @@ -1769,7 +1840,7 @@ static __init int pt_init(void) for_each_online_cpu(cpu) { u64 ctl; - ret = rdmsrl_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl); + ret = rdmsrq_safe_on_cpu(cpu, MSR_IA32_RTIT_CTL, &ctl); if (!ret && (ctl & RTIT_CTL_TRACEEN)) prior_warn++; } @@ -1793,8 +1864,12 @@ static __init int pt_init(void) if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries)) pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG; + else + pt_pmu.pmu.capabilities = PERF_PMU_CAP_AUX_PREFER_LARGE; - pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | PERF_PMU_CAP_ITRACE; + pt_pmu.pmu.capabilities |= PERF_PMU_CAP_EXCLUSIVE | + PERF_PMU_CAP_ITRACE | + PERF_PMU_CAP_AUX_PAUSE; pt_pmu.pmu.attr_groups = pt_attr_groups; pt_pmu.pmu.task_ctx_nr = perf_sw_context; pt_pmu.pmu.event_init = pt_event_init; |