aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/perf/core-book3s.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/perf/core-book3s.c')
-rw-r--r--arch/powerpc/perf/core-book3s.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index a6995d4e93d4..7c4f6690533a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -339,7 +339,7 @@ static void power_pmu_bhrb_reset(void)
static void power_pmu_bhrb_enable(struct perf_event *event)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!ppmu->bhrb_nr)
return;
@@ -354,7 +354,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
static void power_pmu_bhrb_disable(struct perf_event *event)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!ppmu->bhrb_nr)
return;
@@ -1144,7 +1144,7 @@ static void power_pmu_disable(struct pmu *pmu)
if (!ppmu)
return;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!cpuhw->disabled) {
/*
@@ -1211,7 +1211,7 @@ static void power_pmu_enable(struct pmu *pmu)
return;
local_irq_save(flags);
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
if (!cpuhw->disabled)
goto out;
@@ -1403,7 +1403,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
* Add the event to the list (if there is room)
* and check whether the total set is still feasible.
*/
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
n0 = cpuhw->n_events;
if (n0 >= ppmu->n_counter)
goto out;
@@ -1469,7 +1469,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
power_pmu_read(event);
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
for (i = 0; i < cpuhw->n_events; ++i) {
if (event == cpuhw->event[i]) {
while (++i < cpuhw->n_events) {
@@ -1575,7 +1575,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
*/
static void power_pmu_start_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
perf_pmu_disable(pmu);
cpuhw->group_flag |= PERF_EVENT_TXN;
@@ -1589,7 +1589,7 @@ static void power_pmu_start_txn(struct pmu *pmu)
*/
static void power_pmu_cancel_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_pmu_enable(pmu);
@@ -1607,7 +1607,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
if (!ppmu)
return -EAGAIN;
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
n = cpuhw->n_events;
if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
return -EAGAIN;
@@ -1964,7 +1964,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
struct cpu_hw_events *cpuhw;
- cpuhw = &__get_cpu_var(cpu_hw_events);
+ cpuhw = this_cpu_ptr(&cpu_hw_events);
power_pmu_bhrb_read(cpuhw);
data.br_stack = &cpuhw->bhrb_stack;
}
@@ -2037,7 +2037,7 @@ static bool pmc_overflow(unsigned long val)
static void perf_event_interrupt(struct pt_regs *regs)
{
int i, j;
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
unsigned long val[8];
int found, active;