aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2022-08-01 22:20:46 +1000
committerMichael Ellerman <mpe@ellerman.id.au>2022-08-01 22:20:46 +1000
commit96d50a1d874ca28bd779983ac9c2b791c2b406c0 (patch)
tree51d0ffa0f1bac9871edb231f63dd38b6cf6fcef2 /arch/powerpc
parentpowerpc: add support for syscall stack randomization (diff)
parentpowerpc/kvm: Remove comment related to moving PMU code to perf subsystem (diff)
downloadlinux-dev-96d50a1d874ca28bd779983ac9c2b791c2b406c0.tar.xz
linux-dev-96d50a1d874ca28bd779983ac9c2b791c2b406c0.zip
Merge branch 'topic/ppc-kvm' into next
Bring in a few more commits we are keeping in our KVM topic branch.
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/kvm/Makefile1
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_entry.c221
-rw-r--r--arch/powerpc/kvm/book3s_hv_p9_perf.c219
5 files changed, 220 insertions, 242 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 91c9f937edcd..bbf5e2c5fe09 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -280,9 +280,6 @@ extern void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu);
extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu);
long kvmppc_read_intr(void);
-void kvmppc_bad_interrupt(struct pt_regs *regs);
-void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip);
-void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip);
void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr);
void kvmppc_inject_interrupt_hv(struct kvm_vcpu *vcpu, int vec, u64 srr1_flags);
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 0cd23ce07d68..5319d889b184 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -86,6 +86,7 @@ kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
book3s_hv_rm_mmu.o \
book3s_hv_ras.o \
book3s_hv_builtin.o \
+ book3s_hv_p9_perf.o \
$(kvm-book3s_64-builtin-tm-objs-y) \
$(kvm-book3s_64-builtin-xics-objs-y)
endif
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 3abaef5f9ac2..da85f046377a 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -490,24 +490,6 @@ static long kvmppc_read_one_intr(bool *again)
return kvmppc_check_passthru(xisr, xirr, again);
}
-void kvmppc_bad_interrupt(struct pt_regs *regs)
-{
- /*
- * 100 could happen at any time, 200 can happen due to invalid real
- * address access for example (or any time due to a hardware problem).
- */
- if (TRAP(regs) == 0x100) {
- get_paca()->in_nmi++;
- system_reset_exception(regs);
- get_paca()->in_nmi--;
- } else if (TRAP(regs) == 0x200) {
- machine_check_exception(regs);
- } else {
- die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
- }
- panic("Bad KVM trap");
-}
-
static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
{
vcpu->arch.ceded = 0;
diff --git a/arch/powerpc/kvm/book3s_hv_p9_entry.c b/arch/powerpc/kvm/book3s_hv_p9_entry.c
index e740eca45862..34f1db212824 100644
--- a/arch/powerpc/kvm/book3s_hv_p9_entry.c
+++ b/arch/powerpc/kvm/book3s_hv_p9_entry.c
@@ -3,231 +3,10 @@
#include <linux/kvm_host.h>
#include <asm/asm-prototypes.h>
#include <asm/dbell.h>
-#include <asm/kvm_ppc.h>
-#include <asm/pmc.h>
#include <asm/ppc-opcode.h>
#include "book3s_hv.h"
-static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
-{
- if (!(mmcr0 & MMCR0_FC))
- goto do_freeze;
- if (mmcra & MMCRA_SAMPLE_ENABLE)
- goto do_freeze;
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- if (!(mmcr0 & MMCR0_PMCCEXT))
- goto do_freeze;
- if (!(mmcra & MMCRA_BHRB_DISABLE))
- goto do_freeze;
- }
- return;
-
-do_freeze:
- mmcr0 = MMCR0_FC;
- mmcra = 0;
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- mmcr0 |= MMCR0_PMCCEXT;
- mmcra = MMCRA_BHRB_DISABLE;
- }
-
- mtspr(SPRN_MMCR0, mmcr0);
- mtspr(SPRN_MMCRA, mmcra);
- isync();
-}
-
-void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
- struct p9_host_os_sprs *host_os_sprs)
-{
- struct lppaca *lp;
- int load_pmu = 1;
-
- lp = vcpu->arch.vpa.pinned_addr;
- if (lp)
- load_pmu = lp->pmcregs_in_use;
-
- /* Save host */
- if (ppc_get_pmu_inuse()) {
- /*
- * It might be better to put PMU handling (at least for the
- * host) in the perf subsystem because it knows more about what
- * is being used.
- */
-
- /* POWER9, POWER10 do not implement HPMC or SPMC */
-
- host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
- host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
-
- freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
-
- host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
- host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
- host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
- host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
- host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
- host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
- host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
- host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
- host_os_sprs->sdar = mfspr(SPRN_SDAR);
- host_os_sprs->siar = mfspr(SPRN_SIAR);
- host_os_sprs->sier1 = mfspr(SPRN_SIER);
-
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
- host_os_sprs->sier2 = mfspr(SPRN_SIER2);
- host_os_sprs->sier3 = mfspr(SPRN_SIER3);
- }
- }
-
-#ifdef CONFIG_PPC_PSERIES
- /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
- if (kvmhv_on_pseries()) {
- barrier();
- get_lppaca()->pmcregs_in_use = load_pmu;
- barrier();
- }
-#endif
-
- /*
- * Load guest. If the VPA said the PMCs are not in use but the guest
- * tried to access them anyway, HFSCR[PM] will be set by the HFAC
- * fault so we can make forward progress.
- */
- if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
- mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
- mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
- mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
- mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
- mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
- mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
- mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
- mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
- mtspr(SPRN_SDAR, vcpu->arch.sdar);
- mtspr(SPRN_SIAR, vcpu->arch.siar);
- mtspr(SPRN_SIER, vcpu->arch.sier[0]);
-
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
- mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
- mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
- }
-
- /* Set MMCRA then MMCR0 last */
- mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
- mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
- /* No isync necessary because we're starting counters */
-
- if (!vcpu->arch.nested &&
- (vcpu->arch.hfscr_permitted & HFSCR_PM))
- vcpu->arch.hfscr |= HFSCR_PM;
- }
-}
-EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
-
-void switch_pmu_to_host(struct kvm_vcpu *vcpu,
- struct p9_host_os_sprs *host_os_sprs)
-{
- struct lppaca *lp;
- int save_pmu = 1;
-
- lp = vcpu->arch.vpa.pinned_addr;
- if (lp)
- save_pmu = lp->pmcregs_in_use;
- if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
- /*
- * Save pmu if this guest is capable of running nested guests.
- * This is option is for old L1s that do not set their
- * lppaca->pmcregs_in_use properly when entering their L2.
- */
- save_pmu |= nesting_enabled(vcpu->kvm);
- }
-
- if (save_pmu) {
- vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
- vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
-
- freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
-
- vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
- vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
- vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
- vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
- vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
- vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
- vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
- vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
- vcpu->arch.sdar = mfspr(SPRN_SDAR);
- vcpu->arch.siar = mfspr(SPRN_SIAR);
- vcpu->arch.sier[0] = mfspr(SPRN_SIER);
-
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
- vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
- vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
- }
-
- } else if (vcpu->arch.hfscr & HFSCR_PM) {
- /*
- * The guest accessed PMC SPRs without specifying they should
- * be preserved, or it cleared pmcregs_in_use after the last
- * access. Just ensure they are frozen.
- */
- freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
-
- /*
- * Demand-fault PMU register access in the guest.
- *
- * This is used to grab the guest's VPA pmcregs_in_use value
- * and reflect it into the host's VPA in the case of a nested
- * hypervisor.
- *
- * It also avoids having to zero-out SPRs after each guest
- * exit to avoid side-channels when.
- *
- * This is cleared here when we exit the guest, so later HFSCR
- * interrupt handling can add it back to run the guest with
- * PM enabled next time.
- */
- if (!vcpu->arch.nested)
- vcpu->arch.hfscr &= ~HFSCR_PM;
- } /* otherwise the PMU should still be frozen */
-
-#ifdef CONFIG_PPC_PSERIES
- if (kvmhv_on_pseries()) {
- barrier();
- get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
- barrier();
- }
-#endif
-
- if (ppc_get_pmu_inuse()) {
- mtspr(SPRN_PMC1, host_os_sprs->pmc1);
- mtspr(SPRN_PMC2, host_os_sprs->pmc2);
- mtspr(SPRN_PMC3, host_os_sprs->pmc3);
- mtspr(SPRN_PMC4, host_os_sprs->pmc4);
- mtspr(SPRN_PMC5, host_os_sprs->pmc5);
- mtspr(SPRN_PMC6, host_os_sprs->pmc6);
- mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
- mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
- mtspr(SPRN_SDAR, host_os_sprs->sdar);
- mtspr(SPRN_SIAR, host_os_sprs->siar);
- mtspr(SPRN_SIER, host_os_sprs->sier1);
-
- if (cpu_has_feature(CPU_FTR_ARCH_31)) {
- mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
- mtspr(SPRN_SIER2, host_os_sprs->sier2);
- mtspr(SPRN_SIER3, host_os_sprs->sier3);
- }
-
- /* Set MMCRA then MMCR0 last */
- mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
- mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
- isync();
- }
-}
-EXPORT_SYMBOL_GPL(switch_pmu_to_host);
-
static void load_spr_state(struct kvm_vcpu *vcpu,
struct p9_host_os_sprs *host_os_sprs)
{
diff --git a/arch/powerpc/kvm/book3s_hv_p9_perf.c b/arch/powerpc/kvm/book3s_hv_p9_perf.c
new file mode 100644
index 000000000000..44d24cca3df1
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_hv_p9_perf.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <asm/kvm_ppc.h>
+#include <asm/pmc.h>
+
+#include "book3s_hv.h"
+
+static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
+{
+ if (!(mmcr0 & MMCR0_FC))
+ goto do_freeze;
+ if (mmcra & MMCRA_SAMPLE_ENABLE)
+ goto do_freeze;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ if (!(mmcr0 & MMCR0_PMCCEXT))
+ goto do_freeze;
+ if (!(mmcra & MMCRA_BHRB_DISABLE))
+ goto do_freeze;
+ }
+ return;
+
+do_freeze:
+ mmcr0 = MMCR0_FC;
+ mmcra = 0;
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mmcr0 |= MMCR0_PMCCEXT;
+ mmcra = MMCRA_BHRB_DISABLE;
+ }
+
+ mtspr(SPRN_MMCR0, mmcr0);
+ mtspr(SPRN_MMCRA, mmcra);
+ isync();
+}
+
+void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ struct lppaca *lp;
+ int load_pmu = 1;
+
+ lp = vcpu->arch.vpa.pinned_addr;
+ if (lp)
+ load_pmu = lp->pmcregs_in_use;
+
+ /* Save host */
+ if (ppc_get_pmu_inuse()) {
+ /* POWER9, POWER10 do not implement HPMC or SPMC */
+
+ host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
+ host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
+
+ freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
+
+ host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
+ host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
+ host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
+ host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
+ host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
+ host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
+ host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
+ host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
+ host_os_sprs->sdar = mfspr(SPRN_SDAR);
+ host_os_sprs->siar = mfspr(SPRN_SIAR);
+ host_os_sprs->sier1 = mfspr(SPRN_SIER);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
+ host_os_sprs->sier2 = mfspr(SPRN_SIER2);
+ host_os_sprs->sier3 = mfspr(SPRN_SIER3);
+ }
+ }
+
+#ifdef CONFIG_PPC_PSERIES
+ /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
+ if (kvmhv_on_pseries()) {
+ barrier();
+ get_lppaca()->pmcregs_in_use = load_pmu;
+ barrier();
+ }
+#endif
+
+ /*
+ * Load guest. If the VPA said the PMCs are not in use but the guest
+ * tried to access them anyway, HFSCR[PM] will be set by the HFAC
+ * fault so we can make forward progress.
+ */
+ if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
+ mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
+ mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
+ mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
+ mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
+ mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
+ mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
+ mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
+ mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
+ mtspr(SPRN_SDAR, vcpu->arch.sdar);
+ mtspr(SPRN_SIAR, vcpu->arch.siar);
+ mtspr(SPRN_SIER, vcpu->arch.sier[0]);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
+ mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
+ mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
+ }
+
+ /* Set MMCRA then MMCR0 last */
+ mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
+ mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
+ /* No isync necessary because we're starting counters */
+
+ if (!vcpu->arch.nested &&
+ (vcpu->arch.hfscr_permitted & HFSCR_PM))
+ vcpu->arch.hfscr |= HFSCR_PM;
+ }
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
+
+void switch_pmu_to_host(struct kvm_vcpu *vcpu,
+ struct p9_host_os_sprs *host_os_sprs)
+{
+ struct lppaca *lp;
+ int save_pmu = 1;
+
+ lp = vcpu->arch.vpa.pinned_addr;
+ if (lp)
+ save_pmu = lp->pmcregs_in_use;
+ if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
+ /*
+ * Save pmu if this guest is capable of running nested guests.
+ * This is option is for old L1s that do not set their
+ * lppaca->pmcregs_in_use properly when entering their L2.
+ */
+ save_pmu |= nesting_enabled(vcpu->kvm);
+ }
+
+ if (save_pmu) {
+ vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
+ vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
+
+ freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
+
+ vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
+ vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
+ vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
+ vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
+ vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
+ vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
+ vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
+ vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
+ vcpu->arch.sdar = mfspr(SPRN_SDAR);
+ vcpu->arch.siar = mfspr(SPRN_SIAR);
+ vcpu->arch.sier[0] = mfspr(SPRN_SIER);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
+ vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
+ vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
+ }
+
+ } else if (vcpu->arch.hfscr & HFSCR_PM) {
+ /*
+ * The guest accessed PMC SPRs without specifying they should
+ * be preserved, or it cleared pmcregs_in_use after the last
+ * access. Just ensure they are frozen.
+ */
+ freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
+
+ /*
+ * Demand-fault PMU register access in the guest.
+ *
+ * This is used to grab the guest's VPA pmcregs_in_use value
+ * and reflect it into the host's VPA in the case of a nested
+ * hypervisor.
+ *
+ * It also avoids having to zero-out SPRs after each guest
+ * exit to avoid side-channels when.
+ *
+ * This is cleared here when we exit the guest, so later HFSCR
+ * interrupt handling can add it back to run the guest with
+ * PM enabled next time.
+ */
+ if (!vcpu->arch.nested)
+ vcpu->arch.hfscr &= ~HFSCR_PM;
+ } /* otherwise the PMU should still be frozen */
+
+#ifdef CONFIG_PPC_PSERIES
+ if (kvmhv_on_pseries()) {
+ barrier();
+ get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
+ barrier();
+ }
+#endif
+
+ if (ppc_get_pmu_inuse()) {
+ mtspr(SPRN_PMC1, host_os_sprs->pmc1);
+ mtspr(SPRN_PMC2, host_os_sprs->pmc2);
+ mtspr(SPRN_PMC3, host_os_sprs->pmc3);
+ mtspr(SPRN_PMC4, host_os_sprs->pmc4);
+ mtspr(SPRN_PMC5, host_os_sprs->pmc5);
+ mtspr(SPRN_PMC6, host_os_sprs->pmc6);
+ mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
+ mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
+ mtspr(SPRN_SDAR, host_os_sprs->sdar);
+ mtspr(SPRN_SIAR, host_os_sprs->siar);
+ mtspr(SPRN_SIER, host_os_sprs->sier1);
+
+ if (cpu_has_feature(CPU_FTR_ARCH_31)) {
+ mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
+ mtspr(SPRN_SIER2, host_os_sprs->sier2);
+ mtspr(SPRN_SIER3, host_os_sprs->sier3);
+ }
+
+ /* Set MMCRA then MMCR0 last */
+ mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
+ mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
+ isync();
+ }
+}
+EXPORT_SYMBOL_GPL(switch_pmu_to_host);