aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_hv_rmhandlers.S
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 16:17:30 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-24 16:17:30 -0700
commit07acfc2a9349a8ce45b236c2624dad452001966b (patch)
treec40f3eaac18a8320e65af220979223b5cd632b1b /arch/powerpc/kvm/book3s_hv_rmhandlers.S
parentMerge tag 'stable/for-linus-3.5-rc0-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/xen (diff)
parentKVM: make asm-generic/kvm_para.h have an ifdef __KERNEL__ block (diff)
downloadlinux-dev-07acfc2a9349a8ce45b236c2624dad452001966b.tar.xz
linux-dev-07acfc2a9349a8ce45b236c2624dad452001966b.zip
Merge branch 'next' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull KVM changes from Avi Kivity: "Changes include additional instruction emulation, page-crossing MMIO, faster dirty logging, preventing the watchdog from killing a stopped guest, module autoload, a new MSI ABI, and some minor optimizations and fixes. Outside x86 we have a small s390 and a very large ppc update. Regarding the new (for kvm) rebaseless workflow, some of the patches that were merged before we switch trees had to be rebased, while others are true pulls. In either case the signoffs should be correct now." Fix up trivial conflicts in Documentation/feature-removal-schedule.txt arch/powerpc/kvm/book3s_segment.S and arch/x86/include/asm/kvm_para.h. I suspect the kvm_para.h resolution ends up doing the "do I have cpuid" check effectively twice (it was done differently in two different commits), but better safe than sorry ;) * 'next' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (125 commits) KVM: make asm-generic/kvm_para.h have an ifdef __KERNEL__ block KVM: s390: onereg for timer related registers KVM: s390: epoch difference and TOD programmable field KVM: s390: KVM_GET/SET_ONEREG for s390 KVM: s390: add capability indicating COW support KVM: Fix mmu_reload() clash with nested vmx event injection KVM: MMU: Don't use RCU for lockless shadow walking KVM: VMX: Optimize %ds, %es reload KVM: VMX: Fix %ds/%es clobber KVM: x86 emulator: convert bsf/bsr instructions to emulate_2op_SrcV_nobyte() KVM: VMX: unlike vmcs on fail path KVM: PPC: Emulator: clean up SPR reads and writes KVM: PPC: Emulator: clean up instruction parsing kvm/powerpc: Add new ioctl to retreive server MMU infos kvm/book3s: Make kernel emulated H_PUT_TCE available for "PR" KVM KVM: PPC: bookehv: Fix r8/r13 storing in level exception handler KVM: PPC: Book3S: Enable IRQs during exit handling KVM: PPC: Fix PR KVM on POWER7 bare metal KVM: PPC: Fix stbux emulation KVM: PPC: bookehv: Use lwz/stw instead of PPC_LL/PPC_STL for 32-bit fields ...
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rmhandlers.S')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S185
1 files changed, 108 insertions, 77 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b70bf22a3ff3..a84aafce2a12 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -26,6 +26,7 @@
#include <asm/hvcall.h>
#include <asm/asm-offsets.h>
#include <asm/exception-64s.h>
+#include <asm/kvm_book3s_asm.h>
/*****************************************************************************
* *
@@ -82,6 +83,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
#define XICS_XIRR 4
#define XICS_QIRR 0xc
+#define XICS_IPI 2 /* interrupt source # for IPIs */
/*
* We come in here when wakened from nap mode on a secondary hw thread.
@@ -94,26 +96,54 @@ kvm_start_guest:
subi r1,r1,STACK_FRAME_OVERHEAD
ld r2,PACATOC(r13)
- /* were we napping due to cede? */
- lbz r0,HSTATE_NAPPING(r13)
- cmpwi r0,0
- bne kvm_end_cede
+ li r0,KVM_HWTHREAD_IN_KVM
+ stb r0,HSTATE_HWTHREAD_STATE(r13)
- /* get vcpu pointer */
- ld r4, HSTATE_KVM_VCPU(r13)
+ /* NV GPR values from power7_idle() will no longer be valid */
+ li r0,1
+ stb r0,PACA_NAPSTATELOST(r13)
- /* We got here with an IPI; clear it */
- ld r5, HSTATE_XICS_PHYS(r13)
- li r0, 0xff
- li r6, XICS_QIRR
- li r7, XICS_XIRR
- lwzcix r8, r5, r7 /* ack the interrupt */
+ /* get vcpu pointer, NULL if we have no vcpu to run */
+ ld r4,HSTATE_KVM_VCPU(r13)
+ cmpdi cr1,r4,0
+
+ /* Check the wake reason in SRR1 to see why we got here */
+ mfspr r3,SPRN_SRR1
+ rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
+ cmpwi r3,4 /* was it an external interrupt? */
+ bne 27f
+
+ /*
+ * External interrupt - for now assume it is an IPI, since we
+ * should never get any other interrupts sent to offline threads.
+ * Only do this for secondary threads.
+ */
+ beq cr1,25f
+ lwz r3,VCPU_PTID(r4)
+ cmpwi r3,0
+ beq 27f
+25: ld r5,HSTATE_XICS_PHYS(r13)
+ li r0,0xff
+ li r6,XICS_QIRR
+ li r7,XICS_XIRR
+ lwzcix r8,r5,r7 /* get and ack the interrupt */
sync
- stbcix r0, r5, r6 /* clear it */
- stwcix r8, r5, r7 /* EOI it */
+ clrldi. r9,r8,40 /* get interrupt source ID. */
+ beq 27f /* none there? */
+ cmpwi r9,XICS_IPI
+ bne 26f
+ stbcix r0,r5,r6 /* clear IPI */
+26: stwcix r8,r5,r7 /* EOI the interrupt */
- /* NV GPR values from power7_idle() will no longer be valid */
- stb r0, PACA_NAPSTATELOST(r13)
+27: /* XXX should handle hypervisor maintenance interrupts etc. here */
+
+ /* if we have no vcpu to run, go back to sleep */
+ beq cr1,kvm_no_guest
+
+ /* were we napping due to cede? */
+ lbz r0,HSTATE_NAPPING(r13)
+ cmpwi r0,0
+ bne kvm_end_cede
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -129,24 +159,15 @@ kvmppc_hv_entry:
mflr r0
std r0, HSTATE_VMHANDLER(r13)
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
+ /* Set partition DABR */
+ /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
+ li r5,3
+ ld r6,VCPU_DABR(r4)
+ mtspr SPRN_DABRX,r5
+ mtspr SPRN_DABR,r6
+BEGIN_FTR_SECTION
+ isync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Load guest PMU registers */
/* R4 is live here (vcpu pointer) */
@@ -185,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/* Load up FP, VMX and VSX registers */
bl kvmppc_load_fp
+ ld r14, VCPU_GPR(r14)(r4)
+ ld r15, VCPU_GPR(r15)(r4)
+ ld r16, VCPU_GPR(r16)(r4)
+ ld r17, VCPU_GPR(r17)(r4)
+ ld r18, VCPU_GPR(r18)(r4)
+ ld r19, VCPU_GPR(r19)(r4)
+ ld r20, VCPU_GPR(r20)(r4)
+ ld r21, VCPU_GPR(r21)(r4)
+ ld r22, VCPU_GPR(r22)(r4)
+ ld r23, VCPU_GPR(r23)(r4)
+ ld r24, VCPU_GPR(r24)(r4)
+ ld r25, VCPU_GPR(r25)(r4)
+ ld r26, VCPU_GPR(r26)(r4)
+ ld r27, VCPU_GPR(r27)(r4)
+ ld r28, VCPU_GPR(r28)(r4)
+ ld r29, VCPU_GPR(r29)(r4)
+ ld r30, VCPU_GPR(r30)(r4)
+ ld r31, VCPU_GPR(r31)(r4)
+
BEGIN_FTR_SECTION
/* Switch DSCR to guest value */
ld r5, VCPU_DSCR(r4)
@@ -226,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtspr SPRN_DAR, r5
mtspr SPRN_DSISR, r6
- /* Set partition DABR */
- li r5,3
- ld r6,VCPU_DABR(r4)
- mtspr SPRN_DABRX,r5
- mtspr SPRN_DABR,r6
-
BEGIN_FTR_SECTION
/* Restore AMR and UAMOR, set AMOR to all 1s */
ld r5,VCPU_AMR(r4)
@@ -925,12 +959,6 @@ BEGIN_FTR_SECTION
mtspr SPRN_AMR,r6
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
- /* Restore host DABR and DABRX */
- ld r5,HSTATE_DABR(r13)
- li r6,7
- mtspr SPRN_DABR,r5
- mtspr SPRN_DABRX,r6
-
/* Switch DSCR back to host value */
BEGIN_FTR_SECTION
mfspr r8, SPRN_DSCR
@@ -969,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r5, VCPU_SPRG2(r9)
std r6, VCPU_SPRG3(r9)
+ /* save FP state */
+ mr r3, r9
+ bl .kvmppc_save_fp
+
/* Increment yield count if they have a VPA */
ld r8, VCPU_VPA(r9) /* do they have a VPA? */
cmpdi r8, 0
@@ -983,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
mfspr r4, SPRN_MMCR0 /* save MMCR0 */
mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
+ mfspr r6, SPRN_MMCRA
+BEGIN_FTR_SECTION
+ /* On P7, clear MMCRA in order to disable SDAR updates */
+ li r7, 0
+ mtspr SPRN_MMCRA, r7
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
isync
beq 21f /* if no VPA, save PMU stuff anyway */
lbz r7, LPPACA_PMCINUSE(r8)
@@ -991,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
b 22f
21: mfspr r5, SPRN_MMCR1
- mfspr r6, SPRN_MMCRA
std r4, VCPU_MMCR(r9)
std r5, VCPU_MMCR + 8(r9)
std r6, VCPU_MMCR + 16(r9)
@@ -1016,17 +1053,20 @@ BEGIN_FTR_SECTION
stw r11, VCPU_PMC + 28(r9)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
22:
- /* save FP state */
- mr r3, r9
- bl .kvmppc_save_fp
/* Secondary threads go off to take a nap on POWER7 */
BEGIN_FTR_SECTION
- lwz r0,VCPU_PTID(r3)
+ lwz r0,VCPU_PTID(r9)
cmpwi r0,0
bne secondary_nap
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
+ /* Restore host DABR and DABRX */
+ ld r5,HSTATE_DABR(r13)
+ li r6,7
+ mtspr SPRN_DABR,r5
+ mtspr SPRN_DABRX,r6
+
/*
* Reload DEC. HDEC interrupts were disabled when
* we reloaded the host's LPCR value.
@@ -1363,7 +1403,12 @@ bounce_ext_interrupt:
_GLOBAL(kvmppc_h_set_dabr)
std r4,VCPU_DABR(r3)
- mtspr SPRN_DABR,r4
+ /* Work around P7 bug where DABR can get corrupted on mtspr */
+1: mtspr SPRN_DABR,r4
+ mfspr r5, SPRN_DABR
+ cmpd r4, r5
+ bne 1b
+ isync
li r3,0
blr
@@ -1445,8 +1490,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* Take a nap until a decrementer or external interrupt occurs,
* with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
*/
- li r0,0x80
- stb r0,PACAPROCSTART(r13)
+ li r0,1
+ stb r0,HSTATE_HWTHREAD_REQ(r13)
mfspr r5,SPRN_LPCR
ori r5,r5,LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR,r5
@@ -1463,26 +1508,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
kvm_end_cede:
/* Woken by external or decrementer interrupt */
ld r1, HSTATE_HOST_R1(r13)
- ld r2, PACATOC(r13)
- /* If we're a secondary thread and we got here by an IPI, ack it */
- ld r4,HSTATE_KVM_VCPU(r13)
- lwz r3,VCPU_PTID(r4)
- cmpwi r3,0
- beq 27f
- mfspr r3,SPRN_SRR1
- rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
- cmpwi r3,4 /* was it an external interrupt? */
- bne 27f
- ld r5, HSTATE_XICS_PHYS(r13)
- li r0,0xff
- li r6,XICS_QIRR
- li r7,XICS_XIRR
- lwzcix r8,r5,r7 /* ack the interrupt */
- sync
- stbcix r0,r5,r6 /* clear it */
- stwcix r8,r5,r7 /* EOI it */
-27:
/* load up FP state */
bl kvmppc_load_fp
@@ -1580,12 +1606,17 @@ secondary_nap:
stwcx. r3, 0, r4
bne 51b
+kvm_no_guest:
+ li r0, KVM_HWTHREAD_IN_NAP
+ stb r0, HSTATE_HWTHREAD_STATE(r13)
+ li r0, 0
+ std r0, HSTATE_KVM_VCPU(r13)
+
li r3, LPCR_PECE0
mfspr r4, SPRN_LPCR
rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
mtspr SPRN_LPCR, r4
isync
- li r0, 0
std r0, HSTATE_SCRATCH0(r13)
ptesync
ld r0, HSTATE_SCRATCH0(r13)
@@ -1599,8 +1630,8 @@ secondary_nap:
* r3 = vcpu pointer
*/
_GLOBAL(kvmppc_save_fp)
- mfmsr r9
- ori r8,r9,MSR_FP
+ mfmsr r5
+ ori r8,r5,MSR_FP
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
oris r8,r8,MSR_VEC@h
@@ -1649,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
stw r6,VCPU_VRSAVE(r3)
- mtmsrd r9
+ mtmsrd r5
isync
blr