aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 12:47:05 +0200
committerIngo Molnar <mingo@elte.hu>2009-04-29 14:51:04 +0200
commit39d81eab2374d71b2d9c82f66258a1a4f57ddd2e (patch)
tree94900f94b500eb18bb5963a258740c660a33c3e3 /arch/x86/kernel/cpu/perf_counter.c
parentperf_counter, x86: rename struct pmc_x86_ops into struct x86_pmu (diff)
downloadlinux-dev-39d81eab2374d71b2d9c82f66258a1a4f57ddd2e.tar.xz
linux-dev-39d81eab2374d71b2d9c82f66258a1a4f57ddd2e.zip
perf_counter, x86: make interrupt handler model specific
This separates the perfcounter interrupt handler for AMD and Intel cpus. The AMD interrupt handler implementation is a follow-on patch. [ Impact: refactor and clean up code ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-9-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 808a1a113463..9d90de0bd0b0 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -4,6 +4,7 @@
* Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
* Copyright(C) 2009 Jaswinder Singh Rajput
+ * Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter
*
* For licencing details see kernel-base/COPYING
*/
@@ -47,6 +48,7 @@ struct cpu_hw_counters {
* struct x86_pmu - generic x86 pmu
*/
struct x86_pmu {
+ int (*handle_irq)(struct pt_regs *, int);
u64 (*save_disable_all)(void);
void (*restore_all)(u64);
u64 (*get_status)(u64);
@@ -241,6 +243,10 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
struct hw_perf_counter *hwc = &counter->hw;
int err;
+ /* disable temporarily */
+ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
+ return -ENOSYS;
+
if (unlikely(!perf_counters_initialized))
return -EINVAL;
@@ -780,7 +786,7 @@ static void perf_save_and_restart(struct perf_counter *counter)
* This handler is triggered by the local APIC, so the APIC IRQ handling
* rules apply:
*/
-static int __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
+static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
{
int bit, cpu = smp_processor_id();
u64 ack, status;
@@ -827,6 +833,8 @@ out:
return ret;
}
+static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; }
+
void perf_counter_unthrottle(void)
{
struct cpu_hw_counters *cpuc;
@@ -851,7 +859,7 @@ void smp_perf_counter_interrupt(struct pt_regs *regs)
irq_enter();
apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
ack_APIC_irq();
- __smp_perf_counter_interrupt(regs, 0);
+ x86_pmu->handle_irq(regs, 0);
irq_exit();
}
@@ -908,7 +916,7 @@ perf_counter_nmi_handler(struct notifier_block *self,
regs = args->regs;
apic_write(APIC_LVTPC, APIC_DM_NMI);
- ret = __smp_perf_counter_interrupt(regs, 1);
+ ret = x86_pmu->handle_irq(regs, 1);
return ret ? NOTIFY_STOP : NOTIFY_OK;
}
@@ -920,6 +928,7 @@ static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
};
static struct x86_pmu intel_pmu = {
+ .handle_irq = intel_pmu_handle_irq,
.save_disable_all = intel_pmu_save_disable_all,
.restore_all = intel_pmu_restore_all,
.get_status = intel_pmu_get_status,
@@ -934,6 +943,7 @@ static struct x86_pmu intel_pmu = {
};
static struct x86_pmu amd_pmu = {
+ .handle_irq = amd_pmu_handle_irq,
.save_disable_all = amd_pmu_save_disable_all,
.restore_all = amd_pmu_restore_all,
.get_status = amd_pmu_get_status,