aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/perf
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 11:32:50 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-17 11:32:50 -0700
commit5e2d059b52e397d9ac42f4c4d9d9a841887b5818 (patch)
treec8cd8fd7187113be33e29fcc75f45a8bbc27e6b2 /arch/powerpc/perf
parentMerge tag 'modules-for-v4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux (diff)
parentpowerpc/mm/book3s/radix: Add mapping statistics (diff)
downloadlinux-dev-5e2d059b52e397d9ac42f4c4d9d9a841887b5818.tar.xz
linux-dev-5e2d059b52e397d9ac42f4c4d9d9a841887b5818.zip
Merge tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux
Pull powerpc updates from Michael Ellerman: "Notable changes: - A fix for a bug in our page table fragment allocator, where a page table page could be freed and reallocated for something else while still in use, leading to memory corruption etc. The fix reuses pt_mm in struct page (x86 only) for a powerpc only refcount. - Fixes to our pkey support. Several are user-visible changes, but bring us in to line with x86 behaviour and/or fix outright bugs. Thanks to Florian Weimer for reporting many of these. - A series to improve the hvc driver & related OPAL console code, which have been seen to cause hardlockups at times. The hvc driver changes in particular have been in linux-next for ~month. - Increase our MAX_PHYSMEM_BITS to 128TB when SPARSEMEM_VMEMMAP=y. - Remove Power8 DD1 and Power9 DD1 support, neither chip should be in use anywhere other than as a paper weight. - An optimised memcmp implementation using Power7-or-later VMX instructions - Support for barrier_nospec on some NXP CPUs. - Support for flushing the count cache on context switch on some IBM CPUs (controlled by firmware), as a Spectre v2 mitigation. - A series to enhance the information we print on unhandled signals to bring it into line with other arches, including showing the offending VMA and dumping the instructions around the fault. Thanks to: Aaro Koskinen, Akshay Adiga, Alastair D'Silva, Alexey Kardashevskiy, Alexey Spirkov, Alistair Popple, Andrew Donnellan, Aneesh Kumar K.V, Anju T Sudhakar, Arnd Bergmann, Bartosz Golaszewski, Benjamin Herrenschmidt, Bharat Bhushan, Bjoern Noetel, Boqun Feng, Breno Leitao, Bryant G. Ly, Camelia Groza, Christophe Leroy, Christoph Hellwig, Cyril Bur, Dan Carpenter, Daniel Klamt, Darren Stevens, Dave Young, David Gibson, Diana Craciun, Finn Thain, Florian Weimer, Frederic Barrat, Gautham R. Shenoy, Geert Uytterhoeven, Geoff Levand, Guenter Roeck, Gustavo Romero, Haren Myneni, Hari Bathini, Joel Stanley, Jonathan Neuschäfer, Kees Cook, Madhavan Srinivasan, Mahesh Salgaonkar, Markus Elfring, Mathieu Malaterre, Mauro S. M. Rodrigues, Michael Hanselmann, Michael Neuling, Michael Schmitz, Mukesh Ojha, Murilo Opsfelder Araujo, Nicholas Piggin, Parth Y Shah, Paul Mackerras, Paul Menzel, Ram Pai, Randy Dunlap, Rashmica Gupta, Reza Arbab, Rodrigo R. Galvao, Russell Currey, Sam Bobroff, Scott Wood, Shilpasri G Bhat, Simon Guo, Souptick Joarder, Stan Johnson, Thiago Jung Bauermann, Tyrel Datwyler, Vaibhav Jain, Vasant Hegde, Venkat Rao, zhong jiang" * tag 'powerpc-4.19-1' of git://git.kernel.org/pub/scm/linux/kernel/git/powerpc/linux: (234 commits) powerpc/mm/book3s/radix: Add mapping statistics powerpc/uaccess: Enable get_user(u64, *p) on 32-bit powerpc/mm/hash: Remove unnecessary do { } while(0) loop powerpc/64s: move machine check SLB flushing to mm/slb.c powerpc/powernv/idle: Fix build error powerpc/mm/tlbflush: update the mmu_gather page size while iterating address range powerpc/mm: remove warning about ‘type’ being set powerpc/32: Include setup.h header file to fix warnings powerpc: Move `path` variable inside DEBUG_PROM powerpc/powermac: Make some functions static powerpc/powermac: Remove variable x that's never read cxl: remove a dead branch powerpc/powermac: Add missing include of header pmac.h powerpc/kexec: Use common error handling code in setup_new_fdt() powerpc/xmon: Add address lookup for percpu symbols powerpc/mm: remove huge_pte_offset_and_shift() prototype powerpc/lib: Use patch_site to patch copy_32 functions once cache is enabled powerpc/pseries: Fix endianness while restoring of r3 in MCE handler. powerpc/fadump: merge adjacent memory ranges to reduce PT_LOAD segements powerpc/fadump: handle crash memory ranges array index overflow ...
Diffstat (limited to 'arch/powerpc/perf')
-rw-r--r--arch/powerpc/perf/core-book3s.c34
-rw-r--r--arch/powerpc/perf/imc-pmu.c108
-rw-r--r--arch/powerpc/perf/isa207-common.c12
-rw-r--r--arch/powerpc/perf/isa207-common.h5
-rw-r--r--arch/powerpc/perf/power9-pmu.c54
-rw-r--r--arch/powerpc/perf/req-gen/_begin.h2
-rw-r--r--arch/powerpc/perf/req-gen/perf.h1
7 files changed, 60 insertions, 156 deletions
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 19d8ab49d1bd..81f8a0c838ae 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -128,10 +128,6 @@ static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
static void power_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) {}
static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
static void pmao_restore_workaround(bool ebb) { }
-static bool use_ic(u64 event)
-{
- return false;
-}
#endif /* CONFIG_PPC32 */
static bool regs_use_siar(struct pt_regs *regs)
@@ -714,14 +710,6 @@ static void pmao_restore_workaround(bool ebb)
mtspr(SPRN_PMC6, pmcs[5]);
}
-static bool use_ic(u64 event)
-{
- if (cpu_has_feature(CPU_FTR_POWER9_DD1) &&
- (event == 0x200f2 || event == 0x300f2))
- return true;
-
- return false;
-}
#endif /* CONFIG_PPC64 */
static void perf_event_interrupt(struct pt_regs *regs);
@@ -1046,7 +1034,6 @@ static u64 check_and_compute_delta(u64 prev, u64 val)
static void power_pmu_read(struct perf_event *event)
{
s64 val, delta, prev;
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
if (event->hw.state & PERF_HES_STOPPED)
return;
@@ -1056,13 +1043,6 @@ static void power_pmu_read(struct perf_event *event)
if (is_ebb_event(event)) {
val = read_pmc(event->hw.idx);
- if (use_ic(event->attr.config)) {
- val = mfspr(SPRN_IC);
- if (val > cpuhw->ic_init)
- val = val - cpuhw->ic_init;
- else
- val = val + (0 - cpuhw->ic_init);
- }
local64_set(&event->hw.prev_count, val);
return;
}
@@ -1076,13 +1056,6 @@ static void power_pmu_read(struct perf_event *event)
prev = local64_read(&event->hw.prev_count);
barrier();
val = read_pmc(event->hw.idx);
- if (use_ic(event->attr.config)) {
- val = mfspr(SPRN_IC);
- if (val > cpuhw->ic_init)
- val = val - cpuhw->ic_init;
- else
- val = val + (0 - cpuhw->ic_init);
- }
delta = check_and_compute_delta(prev, val);
if (!delta)
return;
@@ -1535,13 +1508,6 @@ nocheck:
event->attr.branch_sample_type);
}
- /*
- * Workaround for POWER9 DD1 to use the Instruction Counter
- * register value for instruction counting
- */
- if (use_ic(event->attr.config))
- cpuhw->ic_init = mfspr(SPRN_IC);
-
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
diff --git a/arch/powerpc/perf/imc-pmu.c b/arch/powerpc/perf/imc-pmu.c
index d1977b61f827..1fafc32b12a0 100644
--- a/arch/powerpc/perf/imc-pmu.c
+++ b/arch/powerpc/perf/imc-pmu.c
@@ -867,59 +867,6 @@ static int thread_imc_cpu_init(void)
ppc_thread_imc_cpu_offline);
}
-void thread_imc_pmu_sched_task(struct perf_event_context *ctx,
- bool sched_in)
-{
- int core_id;
- struct imc_pmu_ref *ref;
-
- if (!is_core_imc_mem_inited(smp_processor_id()))
- return;
-
- core_id = smp_processor_id() / threads_per_core;
- /*
- * imc pmus are enabled only when it is used.
- * See if this is triggered for the first time.
- * If yes, take the mutex lock and enable the counters.
- * If not, just increment the count in ref count struct.
- */
- ref = &core_imc_refc[core_id];
- if (!ref)
- return;
-
- if (sched_in) {
- mutex_lock(&ref->lock);
- if (ref->refc == 0) {
- if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to start the counter\
- for core %d\n", core_id);
- return;
- }
- }
- ++ref->refc;
- mutex_unlock(&ref->lock);
- } else {
- mutex_lock(&ref->lock);
- ref->refc--;
- if (ref->refc == 0) {
- if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
- get_hard_smp_processor_id(smp_processor_id()))) {
- mutex_unlock(&ref->lock);
- pr_err("thread-imc: Unable to stop the counters\
- for core %d\n", core_id);
- return;
- }
- } else if (ref->refc < 0) {
- ref->refc = 0;
- }
- mutex_unlock(&ref->lock);
- }
-
- return;
-}
-
static int thread_imc_event_init(struct perf_event *event)
{
u32 config = event->attr.config;
@@ -1046,22 +993,70 @@ static int imc_event_add(struct perf_event *event, int flags)
static int thread_imc_event_add(struct perf_event *event, int flags)
{
+ int core_id;
+ struct imc_pmu_ref *ref;
+
if (flags & PERF_EF_START)
imc_event_start(event, flags);
- /* Enable the sched_task to start the engine */
- perf_sched_cb_inc(event->ctx->pmu);
+ if (!is_core_imc_mem_inited(smp_processor_id()))
+ return -EINVAL;
+
+ core_id = smp_processor_id() / threads_per_core;
+ /*
+ * imc pmus are enabled only when it is used.
+ * See if this is triggered for the first time.
+ * If yes, take the mutex lock and enable the counters.
+ * If not, just increment the count in ref count struct.
+ */
+ ref = &core_imc_refc[core_id];
+ if (!ref)
+ return -EINVAL;
+
+ mutex_lock(&ref->lock);
+ if (ref->refc == 0) {
+ if (opal_imc_counters_start(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to start the counter\
+ for core %d\n", core_id);
+ return -EINVAL;
+ }
+ }
+ ++ref->refc;
+ mutex_unlock(&ref->lock);
return 0;
}
static void thread_imc_event_del(struct perf_event *event, int flags)
{
+
+ int core_id;
+ struct imc_pmu_ref *ref;
+
/*
* Take a snapshot and calculate the delta and update
* the event counter values.
*/
imc_event_update(event);
- perf_sched_cb_dec(event->ctx->pmu);
+
+ core_id = smp_processor_id() / threads_per_core;
+ ref = &core_imc_refc[core_id];
+
+ mutex_lock(&ref->lock);
+ ref->refc--;
+ if (ref->refc == 0) {
+ if (opal_imc_counters_stop(OPAL_IMC_COUNTERS_CORE,
+ get_hard_smp_processor_id(smp_processor_id()))) {
+ mutex_unlock(&ref->lock);
+ pr_err("thread-imc: Unable to stop the counters\
+ for core %d\n", core_id);
+ return;
+ }
+ } else if (ref->refc < 0) {
+ ref->refc = 0;
+ }
+ mutex_unlock(&ref->lock);
}
/* update_pmu_ops : Populate the appropriate operations for "pmu" */
@@ -1087,7 +1082,6 @@ static int update_pmu_ops(struct imc_pmu *pmu)
break;
case IMC_DOMAIN_THREAD:
pmu->pmu.event_init = thread_imc_event_init;
- pmu->pmu.sched_task = thread_imc_pmu_sched_task;
pmu->pmu.add = thread_imc_event_add;
pmu->pmu.del = thread_imc_event_del;
pmu->pmu.start_txn = thread_imc_pmu_start_txn;
diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c
index 2efee3f196f5..177de814286f 100644
--- a/arch/powerpc/perf/isa207-common.c
+++ b/arch/powerpc/perf/isa207-common.c
@@ -59,7 +59,7 @@ static bool is_event_valid(u64 event)
{
u64 valid_mask = EVENT_VALID_MASK;
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
valid_mask = p9_EVENT_VALID_MASK;
return !(event & ~valid_mask);
@@ -86,8 +86,6 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
* Incase of Power9:
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
* or if group already have any marked events.
- * Non-Marked events (for DD1):
- * MMCRA[SDAR_MODE] will be set to 0b01
* For rest
* MMCRA[SDAR_MODE] will be set from event code.
* If sdar_mode from event is zero, default to 0b01. Hardware
@@ -96,7 +94,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
- else if (!cpu_has_feature(CPU_FTR_POWER9_DD1) && p9_SDAR_MODE(event))
+ else if (p9_SDAR_MODE(event))
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
else
*mmcra |= MMCRA_SDAR_MODE_DCACHE;
@@ -106,7 +104,7 @@ static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
static u64 thresh_cmp_val(u64 value)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return value << p9_MMCRA_THR_CMP_SHIFT;
return value << MMCRA_THR_CMP_SHIFT;
@@ -114,7 +112,7 @@ static u64 thresh_cmp_val(u64 value)
static unsigned long combine_from_event(u64 event)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_EVENT_COMBINE(event);
return EVENT_COMBINE(event);
@@ -122,7 +120,7 @@ static unsigned long combine_from_event(u64 event)
static unsigned long combine_shift(unsigned long pmc)
{
- if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
return p9_MMCR1_COMBINE_SHIFT(pmc);
return MMCR1_COMBINE_SHIFT(pmc);
diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h
index 6a0b586c935a..0028f4b9490d 100644
--- a/arch/powerpc/perf/isa207-common.h
+++ b/arch/powerpc/perf/isa207-common.h
@@ -158,11 +158,6 @@
CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
-/*
- * Lets restrict use of PMC5 for instruction counting.
- */
-#define P9_DD1_TEST_ADDER (ISA207_TEST_ADDER | CNST_PMC_VAL(5))
-
/* Bits in MMCR1 for PowerISA v2.07 */
#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 2ca0b33b4efb..e012b1030a5b 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -219,12 +219,6 @@ static struct attribute_group power9_pmu_events_group = {
.attrs = power9_events_attr,
};
-static const struct attribute_group *power9_isa207_pmu_attr_groups[] = {
- &isa207_pmu_format_group,
- &power9_pmu_events_group,
- NULL,
-};
-
PMU_FORMAT_ATTR(event, "config:0-51");
PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
PMU_FORMAT_ATTR(mark, "config:8");
@@ -267,17 +261,6 @@ static const struct attribute_group *power9_pmu_attr_groups[] = {
NULL,
};
-static int power9_generic_events_dd1[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
- [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_DISP,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BR_CMPL_ALT,
- [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
- [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
- [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1_FIN,
-};
-
static int power9_generic_events[] = {
[PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_ICT_NOSLOT_CYC,
@@ -439,25 +422,6 @@ static int power9_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
#undef C
-static struct power_pmu power9_isa207_pmu = {
- .name = "POWER9",
- .n_counter = MAX_PMU_COUNTERS,
- .add_fields = ISA207_ADD_FIELDS,
- .test_adder = P9_DD1_TEST_ADDER,
- .compute_mmcr = isa207_compute_mmcr,
- .config_bhrb = power9_config_bhrb,
- .bhrb_filter_map = power9_bhrb_filter_map,
- .get_constraint = isa207_get_constraint,
- .get_alternatives = power9_get_alternatives,
- .disable_pmc = isa207_disable_pmc,
- .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
- .n_generic = ARRAY_SIZE(power9_generic_events_dd1),
- .generic_events = power9_generic_events_dd1,
- .cache_events = &power9_cache_events,
- .attr_groups = power9_isa207_pmu_attr_groups,
- .bhrb_nr = 32,
-};
-
static struct power_pmu power9_pmu = {
.name = "POWER9",
.n_counter = MAX_PMU_COUNTERS,
@@ -500,23 +464,7 @@ static int __init init_power9_pmu(void)
}
}
- if (cpu_has_feature(CPU_FTR_POWER9_DD1)) {
- /*
- * Since PM_INST_CMPL may not provide right counts in all
- * sampling scenarios in power9 DD1, instead use PM_INST_DISP.
- */
- EVENT_VAR(PM_INST_CMPL, _g).id = PM_INST_DISP;
- /*
- * Power9 DD1 should use PM_BR_CMPL_ALT event code for
- * "branches" to provide correct counter value.
- */
- EVENT_VAR(PM_BR_CMPL, _g).id = PM_BR_CMPL_ALT;
- EVENT_VAR(PM_BR_CMPL, _c).id = PM_BR_CMPL_ALT;
- rc = register_power_pmu(&power9_isa207_pmu);
- } else {
- rc = register_power_pmu(&power9_pmu);
- }
-
+ rc = register_power_pmu(&power9_pmu);
if (rc)
return rc;
diff --git a/arch/powerpc/perf/req-gen/_begin.h b/arch/powerpc/perf/req-gen/_begin.h
index 549f8782c52d..a200b86eba3b 100644
--- a/arch/powerpc/perf/req-gen/_begin.h
+++ b/arch/powerpc/perf/req-gen/_begin.h
@@ -3,6 +3,8 @@
#ifndef POWERPC_PERF_REQ_GEN_H_
#define POWERPC_PERF_REQ_GEN_H_
+#include <linux/stringify.h>
+
#define CAT2_STR_(t, s) __stringify(t/s)
#define CAT2_STR(t, s) CAT2_STR_(t, s)
#define I(...) __VA_ARGS__
diff --git a/arch/powerpc/perf/req-gen/perf.h b/arch/powerpc/perf/req-gen/perf.h
index 871a9a1766c2..fa9bc804e67a 100644
--- a/arch/powerpc/perf/req-gen/perf.h
+++ b/arch/powerpc/perf/req-gen/perf.h
@@ -3,6 +3,7 @@
#define LINUX_POWERPC_PERF_REQ_GEN_PERF_H_
#include <linux/perf_event.h>
+#include <linux/stringify.h>
#ifndef REQUEST_FILE
#error "REQUEST_FILE must be defined before including"