aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/perf_cpum_cf.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/perf_cpum_cf.c')
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c224
1 files changed, 27 insertions, 197 deletions
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index d5523adeddbf..e1c54d28713a 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -10,73 +10,11 @@
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
-#include <linux/perf_event.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/init.h>
#include <linux/export.h>
-#include <asm/ctl_reg.h>
-#include <asm/irq.h>
-#include <asm/cpu_mf.h>
-
-enum cpumf_ctr_set {
- CPUMF_CTR_SET_BASIC = 0, /* Basic Counter Set */
- CPUMF_CTR_SET_USER = 1, /* Problem-State Counter Set */
- CPUMF_CTR_SET_CRYPTO = 2, /* Crypto-Activity Counter Set */
- CPUMF_CTR_SET_EXT = 3, /* Extended Counter Set */
- CPUMF_CTR_SET_MT_DIAG = 4, /* MT-diagnostic Counter Set */
-
- /* Maximum number of counter sets */
- CPUMF_CTR_SET_MAX,
-};
-
-#define CPUMF_LCCTL_ENABLE_SHIFT 16
-#define CPUMF_LCCTL_ACTCTL_SHIFT 0
-static const u64 cpumf_state_ctl[CPUMF_CTR_SET_MAX] = {
- [CPUMF_CTR_SET_BASIC] = 0x02,
- [CPUMF_CTR_SET_USER] = 0x04,
- [CPUMF_CTR_SET_CRYPTO] = 0x08,
- [CPUMF_CTR_SET_EXT] = 0x01,
- [CPUMF_CTR_SET_MT_DIAG] = 0x20,
-};
-
-static void ctr_set_enable(u64 *state, int ctr_set)
-{
- *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT;
-}
-static void ctr_set_disable(u64 *state, int ctr_set)
-{
- *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ENABLE_SHIFT);
-}
-static void ctr_set_start(u64 *state, int ctr_set)
-{
- *state |= cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT;
-}
-static void ctr_set_stop(u64 *state, int ctr_set)
-{
- *state &= ~(cpumf_state_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
-}
-
-/* Local CPUMF event structure */
-struct cpu_hw_events {
- struct cpumf_ctr_info info;
- atomic_t ctr_set[CPUMF_CTR_SET_MAX];
- u64 state, tx_state;
- unsigned int flags;
- unsigned int txn_flags;
-};
-static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
- .ctr_set = {
- [CPUMF_CTR_SET_BASIC] = ATOMIC_INIT(0),
- [CPUMF_CTR_SET_USER] = ATOMIC_INIT(0),
- [CPUMF_CTR_SET_CRYPTO] = ATOMIC_INIT(0),
- [CPUMF_CTR_SET_EXT] = ATOMIC_INIT(0),
- [CPUMF_CTR_SET_MT_DIAG] = ATOMIC_INIT(0),
- },
- .state = 0,
- .flags = 0,
- .txn_flags = 0,
-};
+#include <asm/cpu_mcf.h>
static enum cpumf_ctr_set get_counter_set(u64 event)
{
@@ -98,11 +36,11 @@ static enum cpumf_ctr_set get_counter_set(u64 event)
static int validate_ctr_version(const struct hw_perf_event *hwc)
{
- struct cpu_hw_events *cpuhw;
+ struct cpu_cf_events *cpuhw;
int err = 0;
u16 mtdiag_ctl;
- cpuhw = &get_cpu_var(cpu_hw_events);
+ cpuhw = &get_cpu_var(cpu_cf_events);
/* check required version for counter sets */
switch (hwc->config_base) {
@@ -135,7 +73,7 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
* Thus, the counters can only be used if SMT is on and the
* counter set is enabled and active.
*/
- mtdiag_ctl = cpumf_state_ctl[CPUMF_CTR_SET_MT_DIAG];
+ mtdiag_ctl = cpumf_ctr_ctl[CPUMF_CTR_SET_MT_DIAG];
if (!((cpuhw->info.auth_ctl & mtdiag_ctl) &&
(cpuhw->info.enable_ctl & mtdiag_ctl) &&
(cpuhw->info.act_ctl & mtdiag_ctl)))
@@ -143,28 +81,28 @@ static int validate_ctr_version(const struct hw_perf_event *hwc)
break;
}
- put_cpu_var(cpu_hw_events);
+ put_cpu_var(cpu_cf_events);
return err;
}
static int validate_ctr_auth(const struct hw_perf_event *hwc)
{
- struct cpu_hw_events *cpuhw;
+ struct cpu_cf_events *cpuhw;
u64 ctrs_state;
int err = 0;
- cpuhw = &get_cpu_var(cpu_hw_events);
+ cpuhw = &get_cpu_var(cpu_cf_events);
/* Check authorization for cpu counter sets.
* If the particular CPU counter set is not authorized,
* return with -ENOENT in order to fall back to other
* PMUs that might suffice the event request.
*/
- ctrs_state = cpumf_state_ctl[hwc->config_base];
+ ctrs_state = cpumf_ctr_ctl[hwc->config_base];
if (!(ctrs_state & cpuhw->info.auth_ctl))
err = -ENOENT;
- put_cpu_var(cpu_hw_events);
+ put_cpu_var(cpu_cf_events);
return err;
}
@@ -175,7 +113,7 @@ static int validate_ctr_auth(const struct hw_perf_event *hwc)
*/
static void cpumf_pmu_enable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
int err;
if (cpuhw->flags & PMU_F_ENABLED)
@@ -198,7 +136,7 @@ static void cpumf_pmu_enable(struct pmu *pmu)
*/
static void cpumf_pmu_disable(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
int err;
u64 inactive;
@@ -222,86 +160,13 @@ static atomic_t num_events = ATOMIC_INIT(0);
/* Used to avoid races in calling reserve/release_cpumf_hardware */
static DEFINE_MUTEX(pmc_reserve_mutex);
-/* CPU-measurement alerts for the counter facility */
-static void cpumf_measurement_alert(struct ext_code ext_code,
- unsigned int alert, unsigned long unused)
-{
- struct cpu_hw_events *cpuhw;
-
- if (!(alert & CPU_MF_INT_CF_MASK))
- return;
-
- inc_irq_stat(IRQEXT_CMC);
- cpuhw = this_cpu_ptr(&cpu_hw_events);
-
- /* Measurement alerts are shared and might happen when the PMU
- * is not reserved. Ignore these alerts in this case. */
- if (!(cpuhw->flags & PMU_F_RESERVED))
- return;
-
- /* counter authorization change alert */
- if (alert & CPU_MF_INT_CF_CACA)
- qctri(&cpuhw->info);
-
- /* loss of counter data alert */
- if (alert & CPU_MF_INT_CF_LCDA)
- pr_err("CPU[%i] Counter data was lost\n", smp_processor_id());
-
- /* loss of MT counter data alert */
- if (alert & CPU_MF_INT_CF_MTDA)
- pr_warn("CPU[%i] MT counter data was lost\n",
- smp_processor_id());
-}
-
-#define PMC_INIT 0
-#define PMC_RELEASE 1
-static void setup_pmc_cpu(void *flags)
-{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
-
- switch (*((int *) flags)) {
- case PMC_INIT:
- memset(&cpuhw->info, 0, sizeof(cpuhw->info));
- qctri(&cpuhw->info);
- cpuhw->flags |= PMU_F_RESERVED;
- break;
-
- case PMC_RELEASE:
- cpuhw->flags &= ~PMU_F_RESERVED;
- break;
- }
-
- /* Disable CPU counter sets */
- lcctl(0);
-}
-
-/* Initialize the CPU-measurement facility */
-static int reserve_pmc_hardware(void)
-{
- int flags = PMC_INIT;
-
- on_each_cpu(setup_pmc_cpu, &flags, 1);
- irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-
- return 0;
-}
-
-/* Release the CPU-measurement facility */
-static void release_pmc_hardware(void)
-{
- int flags = PMC_RELEASE;
-
- on_each_cpu(setup_pmc_cpu, &flags, 1);
- irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
-}
-
/* Release the PMU if event is the last perf event */
static void hw_perf_event_destroy(struct perf_event *event)
{
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
- release_pmc_hardware();
+ __kernel_cpumcf_end();
mutex_unlock(&pmc_reserve_mutex);
}
}
@@ -332,7 +197,7 @@ static int __hw_perf_event_init(struct perf_event *event)
struct perf_event_attr *attr = &event->attr;
struct hw_perf_event *hwc = &event->hw;
enum cpumf_ctr_set set;
- int err;
+ int err = 0;
u64 ev;
switch (attr->type) {
@@ -402,12 +267,14 @@ static int __hw_perf_event_init(struct perf_event *event)
/* Initialize for using the CPU-measurement counter facility */
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
+ if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin())
err = -EBUSY;
else
atomic_inc(&num_events);
mutex_unlock(&pmc_reserve_mutex);
}
+ if (err)
+ return err;
event->destroy = hw_perf_event_destroy;
/* Finally, validate version and authorization of the counter set */
@@ -488,7 +355,7 @@ static void cpumf_pmu_read(struct perf_event *event)
static void cpumf_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct hw_perf_event *hwc = &event->hw;
if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
@@ -519,7 +386,7 @@ static void cpumf_pmu_start(struct perf_event *event, int flags)
static void cpumf_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
struct hw_perf_event *hwc = &event->hw;
if (!(hwc->state & PERF_HES_STOPPED)) {
@@ -540,7 +407,7 @@ static void cpumf_pmu_stop(struct perf_event *event, int flags)
static int cpumf_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
/* Check authorization for the counter set to which this
* counter belongs.
@@ -564,7 +431,7 @@ static int cpumf_pmu_add(struct perf_event *event, int flags)
static void cpumf_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
cpumf_pmu_stop(event, PERF_EF_UPDATE);
@@ -592,7 +459,7 @@ static void cpumf_pmu_del(struct perf_event *event, int flags)
*/
static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
WARN_ON_ONCE(cpuhw->txn_flags); /* txn already in flight */
@@ -612,7 +479,7 @@ static void cpumf_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
static void cpumf_pmu_cancel_txn(struct pmu *pmu)
{
unsigned int txn_flags;
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
@@ -633,7 +500,7 @@ static void cpumf_pmu_cancel_txn(struct pmu *pmu)
*/
static int cpumf_pmu_commit_txn(struct pmu *pmu)
{
- struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
+ struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
u64 state;
WARN_ON_ONCE(!cpuhw->txn_flags); /* no txn in flight */
@@ -671,54 +538,17 @@ static struct pmu cpumf_pmu = {
.cancel_txn = cpumf_pmu_cancel_txn,
};
-static int cpumf_pmf_setup(unsigned int cpu, int flags)
-{
- local_irq_disable();
- setup_pmc_cpu(&flags);
- local_irq_enable();
- return 0;
-}
-
-static int s390_pmu_online_cpu(unsigned int cpu)
-{
- return cpumf_pmf_setup(cpu, PMC_INIT);
-}
-
-static int s390_pmu_offline_cpu(unsigned int cpu)
-{
- return cpumf_pmf_setup(cpu, PMC_RELEASE);
-}
-
static int __init cpumf_pmu_init(void)
{
int rc;
- if (!cpum_cf_avail())
+ if (!kernel_cpumcf_avail())
return -ENODEV;
- /* clear bit 15 of cr0 to unauthorize problem-state to
- * extract measurement counters */
- ctl_clear_bit(0, 48);
-
- /* register handler for measurement-alert interruptions */
- rc = register_external_irq(EXT_IRQ_MEASURE_ALERT,
- cpumf_measurement_alert);
- if (rc) {
- pr_err("Registering for CPU-measurement alerts "
- "failed with rc=%i\n", rc);
- return rc;
- }
-
cpumf_pmu.attr_groups = cpumf_cf_event_group();
rc = perf_pmu_register(&cpumf_pmu, "cpum_cf", PERF_TYPE_RAW);
- if (rc) {
+ if (rc)
pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc);
- unregister_external_irq(EXT_IRQ_MEASURE_ALERT,
- cpumf_measurement_alert);
- return rc;
- }
- return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE,
- "perf/s390/cf:online",
- s390_pmu_online_cpu, s390_pmu_offline_cpu);
+ return rc;
}
-early_initcall(cpumf_pmu_init);
+subsys_initcall(cpumf_pmu_init);