aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen/pmu.c
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>2015-08-10 16:34:36 -0400
committerDavid Vrabel <david.vrabel@citrix.com>2015-08-20 12:25:25 +0100
commit6b08cd6328c58a2ae190c5ee03a2ffcab5ef828e (patch)
tree90fd9fee020e844c50f2b0c9bd40b8ccffdfe14b /arch/x86/xen/pmu.c
parentxen/PMU: Describe vendor-specific PMU registers (diff)
downloadlinux-dev-6b08cd6328c58a2ae190c5ee03a2ffcab5ef828e.tar.xz
linux-dev-6b08cd6328c58a2ae190c5ee03a2ffcab5ef828e.zip
xen/PMU: Intercept PMU-related MSR and APIC accesses
Provide interfaces for recognizing accesses to PMU-related MSRs and LVTPC APIC and process these accesses in Xen PMU code. (The interrupt handler performs XENPMU_flush right away in the beginning since no PMU emulation is available. It will be added with a later patch). Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'arch/x86/xen/pmu.c')
-rw-r--r--arch/x86/xen/pmu.c95
1 files changed, 94 insertions, 1 deletions
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index a4a6e4f04f37..f92b908e005f 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -51,6 +51,8 @@ static __read_mostly int amd_num_counters;
/* Alias registers (0x4c1) for full-width writes to PMCs */
#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0))
+#define INTEL_PMC_TYPE_SHIFT 30
+
static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters;
@@ -167,6 +169,91 @@ static int is_intel_pmu_msr(u32 msr_index, int *type, int *index)
}
}
+bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err)
+{
+
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ if (is_amd_pmu_msr(msr)) {
+ *val = native_read_msr_safe(msr, err);
+ return true;
+ }
+ } else {
+ int type, index;
+
+ if (is_intel_pmu_msr(msr, &type, &index)) {
+ *val = native_read_msr_safe(msr, err);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
+ if (is_amd_pmu_msr(msr)) {
+ *err = native_write_msr_safe(msr, low, high);
+ return true;
+ }
+ } else {
+ int type, index;
+
+ if (is_intel_pmu_msr(msr, &type, &index)) {
+ *err = native_write_msr_safe(msr, low, high);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static unsigned long long xen_amd_read_pmc(int counter)
+{
+ uint32_t msr;
+ int err;
+
+ msr = amd_counters_base + (counter * amd_msr_step);
+ return native_read_msr_safe(msr, &err);
+}
+
+static unsigned long long xen_intel_read_pmc(int counter)
+{
+ int err;
+ uint32_t msr;
+
+ if (counter & (1<<INTEL_PMC_TYPE_SHIFT))
+ msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff);
+ else
+ msr = MSR_IA32_PERFCTR0 + counter;
+
+ return native_read_msr_safe(msr, &err);
+}
+
+unsigned long long xen_read_pmc(int counter)
+{
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return xen_amd_read_pmc(counter);
+ else
+ return xen_intel_read_pmc(counter);
+}
+
+int pmu_apic_update(uint32_t val)
+{
+ int ret;
+ struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
+
+ if (!xenpmu_data) {
+ pr_warn_once("%s: pmudata not initialized\n", __func__);
+ return -EINVAL;
+ }
+
+ xenpmu_data->pmu.l.lapic_lvtpc = val;
+ ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL);
+
+ return ret;
+}
+
/* perf callbacks */
static int xen_is_in_guest(void)
{
@@ -239,7 +326,7 @@ static void xen_convert_regs(const struct xen_pmu_regs *xen_regs,
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
- int ret = IRQ_NONE;
+ int err, ret = IRQ_NONE;
struct pt_regs regs;
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
@@ -248,6 +335,12 @@ irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
return ret;
}
+ err = HYPERVISOR_xenpmu_op(XENPMU_flush, NULL);
+ if (err) {
+ pr_warn_once("%s: failed hypercall, err: %d\n", __func__, err);
+ return ret;
+ }
+
xen_convert_regs(&xenpmu_data->pmu.r.regs, &regs,
xenpmu_data->pmu.pmu_flags);
if (x86_pmu.handle_irq(&regs))