aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event_xscale.c
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2010-12-02 18:01:49 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-12-04 11:18:08 +0000
commit961ec6daa7b14f376c30d447a830fa4783a2112c (patch)
treefd4952bd65fa9e991de7687eaef4b2e5af7a0e70 /arch/arm/kernel/perf_event_xscale.c
parentARM: 6512/1: perf: fix warnings generated by sparse (diff)
downloadlinux-dev-961ec6daa7b14f376c30d447a830fa4783a2112c.tar.xz
linux-dev-961ec6daa7b14f376c30d447a830fa4783a2112c.zip
ARM: 6521/1: perf: use raw_spinlock_t for pmu_lock
For kernels built with PREEMPT_RT, critical sections protected by standard spinlocks are preemptible. This is not acceptable on perf as (a) we may be scheduled onto a different CPU whilst reading/writing banked PMU registers and (b) the latency when reading the PMU registers becomes unpredictable. This patch upgrades the pmu_lock spinlock to a raw_spinlock instead. Reported-by: Jamie Iles <jamie@jamieiles.com> Signed-off-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/perf_event_xscale.c')
-rw-r--r--arch/arm/kernel/perf_event_xscale.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index f14fbb6c345b..28cd3b025bc3 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -291,12 +291,12 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
return;
}
- spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
- spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
@@ -322,12 +322,12 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
return;
}
- spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~mask;
val |= evt;
xscale1pmu_write_pmnc(val);
- spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
@@ -355,11 +355,11 @@ xscale1pmu_start(void)
{
unsigned long flags, val;
- spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val |= XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
- spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
@@ -367,11 +367,11 @@ xscale1pmu_stop(void)
{
unsigned long flags, val;
- spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale1pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale1pmu_write_pmnc(val);
- spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static inline u32
@@ -635,10 +635,10 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
return;
}
- spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
- spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
@@ -678,10 +678,10 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
return;
}
- spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&pmu_lock, flags);
xscale2pmu_write_event_select(evtsel);
xscale2pmu_write_int_enable(ien);
- spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static int
@@ -705,11 +705,11 @@ xscale2pmu_start(void)
{
unsigned long flags, val;
- spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
val |= XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
- spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static void
@@ -717,11 +717,11 @@ xscale2pmu_stop(void)
{
unsigned long flags, val;
- spin_lock_irqsave(&pmu_lock, flags);
+ raw_spin_lock_irqsave(&pmu_lock, flags);
val = xscale2pmu_read_pmnc();
val &= ~XSCALE_PMU_ENABLE;
xscale2pmu_write_pmnc(val);
- spin_unlock_irqrestore(&pmu_lock, flags);
+ raw_spin_unlock_irqrestore(&pmu_lock, flags);
}
static inline u32