aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHendrik Brueckner <brueckner@linux.ibm.com>2018-08-06 17:43:07 +0200
committerMartin Schwidefsky <schwidefsky@de.ibm.com>2019-02-22 09:19:47 +0100
commit3d33345aa3d9ab2ee9b5a5bf2a8842c43603d537 (patch)
tree72ec596cfd321286b9c714d488fc9b18123feb9a
parents390/cpum_cf: move counter set controls to a new header file (diff)
downloadlinux-dev-3d33345aa3d9ab2ee9b5a5bf2a8842c43603d537.tar.xz
linux-dev-3d33345aa3d9ab2ee9b5a5bf2a8842c43603d537.zip
s390/cpum_cf: prepare for in-kernel counter measurements
Prepare the counter facility support to be used by other in-kernel users. The first step introduces the __kernel_cpumcf_begin() and __kernel_cpumcf_end() functions to reserve the counter facility for doing measurements and to release after the measurements are done. Signed-off-by: Hendrik Brueckner <brueckner@linux.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
-rw-r--r--arch/s390/include/asm/cpu_mcf.h3
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c32
2 files changed, 29 insertions, 6 deletions
diff --git a/arch/s390/include/asm/cpu_mcf.h b/arch/s390/include/asm/cpu_mcf.h
index b6e73fbf87d0..63fa74115cba 100644
--- a/arch/s390/include/asm/cpu_mcf.h
+++ b/arch/s390/include/asm/cpu_mcf.h
@@ -49,4 +49,7 @@ static inline void ctr_set_stop(u64 *state, int ctr_set)
*state &= ~(cpumf_ctr_ctl[ctr_set] << CPUMF_LCCTL_ACTCTL_SHIFT);
}
+int __kernel_cpumcf_begin(void);
+void __kernel_cpumcf_end(void);
+
#endif /* _ASM_S390_CPU_MCF_H */
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index 66d945d5589b..b2e46b8b881a 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -236,25 +236,45 @@ static void setup_pmc_cpu(void *flags)
lcctl(0);
}
-/* Initialize the CPU-measurement facility */
-static int reserve_pmc_hardware(void)
+/* Reserve/release functions for sharing perf hardware */
+static DEFINE_SPINLOCK(cpumcf_owner_lock);
+static void *cpumcf_owner;
+
+/* Initialize the CPU-measurement counter facility */
+int __kernel_cpumcf_begin(void)
{
int flags = PMC_INIT;
+ int err = 0;
+
+ spin_lock(&cpumcf_owner_lock);
+ if (cpumcf_owner)
+ err = -EBUSY;
+ else
+ cpumcf_owner = __builtin_return_address(0);
+ spin_unlock(&cpumcf_owner_lock);
+ if (err)
+ return err;
on_each_cpu(setup_pmc_cpu, &flags, 1);
irq_subclass_register(IRQ_SUBCLASS_MEASUREMENT_ALERT);
return 0;
}
+EXPORT_SYMBOL(__kernel_cpumcf_begin);
-/* Release the CPU-measurement facility */
-static void release_pmc_hardware(void)
+/* Release the CPU-measurement counter facility */
+void __kernel_cpumcf_end(void)
{
int flags = PMC_RELEASE;
on_each_cpu(setup_pmc_cpu, &flags, 1);
irq_subclass_unregister(IRQ_SUBCLASS_MEASUREMENT_ALERT);
+
+ spin_lock(&cpumcf_owner_lock);
+ cpumcf_owner = NULL;
+ spin_unlock(&cpumcf_owner_lock);
}
+EXPORT_SYMBOL(__kernel_cpumcf_end);
/* Release the PMU if event is the last perf event */
static void hw_perf_event_destroy(struct perf_event *event)
@@ -262,7 +282,7 @@ static void hw_perf_event_destroy(struct perf_event *event)
if (!atomic_add_unless(&num_events, -1, 1)) {
mutex_lock(&pmc_reserve_mutex);
if (atomic_dec_return(&num_events) == 0)
- release_pmc_hardware();
+ __kernel_cpumcf_end();
mutex_unlock(&pmc_reserve_mutex);
}
}
@@ -363,7 +383,7 @@ static int __hw_perf_event_init(struct perf_event *event)
/* Initialize for using the CPU-measurement counter facility */
if (!atomic_inc_not_zero(&num_events)) {
mutex_lock(&pmc_reserve_mutex);
- if (atomic_read(&num_events) == 0 && reserve_pmc_hardware())
+ if (atomic_read(&num_events) == 0 && __kernel_cpumcf_begin())
err = -EBUSY;
else
atomic_inc(&num_events);