aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShannon Zhao <shannon.zhao@linaro.org>2015-10-28 12:10:30 +0800
committerMarc Zyngier <marc.zyngier@arm.com>2016-02-29 18:34:21 +0000
commit76993739cd6f5b42e881fe3332b9f8eb98cd6907 (patch)
treec2a3e47cacfff585748d69b685c6c8a2e5a390e4
parentarm64: KVM: Add access handler for PMSWINC register (diff)
downloadlinux-dev-76993739cd6f5b42e881fe3332b9f8eb98cd6907.tar.xz
linux-dev-76993739cd6f5b42e881fe3332b9f8eb98cd6907.zip
arm64: KVM: Add helper to handle PMCR register bits
According to ARMv8 spec, when writing 1 to PMCR.E, all counters are enabled by PMCNTENSET, while writing 0 to PMCR.E, all counters are disabled. When writing 1 to PMCR.P, reset all event counters, not including PMCCNTR, to zero. When writing 1 to PMCR.C, reset PMCCNTR to zero. Signed-off-by: Shannon Zhao <shannon.zhao@linaro.org> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm64/include/asm/kvm_perf_event.h4
-rw-r--r--arch/arm64/kvm/sys_regs.c1
-rw-r--r--include/kvm/arm_pmu.h2
-rw-r--r--virt/kvm/arm/pmu.c34
4 files changed, 40 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h
index 62fa60fbc0b3..6d080c07873b 100644
--- a/arch/arm64/include/asm/kvm_perf_event.h
+++ b/arch/arm64/include/asm/kvm_perf_event.h
@@ -29,9 +29,11 @@
#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+/* Determines which bit of PMCCNTR_EL0 generates an overflow */
+#define ARMV8_PMU_PMCR_LC (1 << 6)
#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
#define ARMV8_PMU_PMCR_N_MASK 0x1f
-#define ARMV8_PMU_PMCR_MASK 0x3f /* Mask for writable bits */
+#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */
/*
* PMOVSR: counters overflow flag status reg
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 10e53796926c..12f36ef8caa0 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -467,6 +467,7 @@ static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
val &= ~ARMV8_PMU_PMCR_MASK;
val |= p->regval & ARMV8_PMU_PMCR_MASK;
vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+ kvm_pmu_handle_pmcr(vcpu, val);
} else {
/* PMCR.P & PMCR.C are RAZ */
val = vcpu_sys_reg(vcpu, PMCR_EL0)
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 348c4c9d763a..8bc92d119713 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -45,6 +45,7 @@ void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
u64 select_idx);
#else
@@ -67,6 +68,7 @@ static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
u64 data, u64 select_idx) {}
#endif
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
index 9fc775ef03ec..cda869c609dd 100644
--- a/virt/kvm/arm/pmu.c
+++ b/virt/kvm/arm/pmu.c
@@ -210,6 +210,40 @@ void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
}
}
+/**
+ * kvm_pmu_handle_pmcr - handle PMCR register
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCR register
+ */
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc;
+ u64 mask;
+ int i;
+
+ mask = kvm_pmu_valid_counter_mask(vcpu);
+ if (val & ARMV8_PMU_PMCR_E) {
+ kvm_pmu_enable_counter(vcpu,
+ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
+ } else {
+ kvm_pmu_disable_counter(vcpu, mask);
+ }
+
+ if (val & ARMV8_PMU_PMCR_C)
+ kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
+
+ if (val & ARMV8_PMU_PMCR_P) {
+ for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
+ kvm_pmu_set_counter_value(vcpu, i, 0);
+ }
+
+ if (val & ARMV8_PMU_PMCR_LC) {
+ pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
+ pmc->bitmask = 0xffffffffffffffffUL;
+ }
+}
+
static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
{
return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&