aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_hv.c96
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S229
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S80
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c1
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S1
-rw-r--r--arch/powerpc/kvm/book3s_segment.S2
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S277
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S229
8 files changed, 468 insertions, 447 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index d084e412b3c5..83e929e66f9d 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
return err;
}
-static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
+static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
{
+ struct kvm *kvm = vcpu->kvm;
void *va;
unsigned long nb;
+ unsigned long gpa;
- vpap->update_pending = 0;
- va = NULL;
- if (vpap->next_gpa) {
- va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
- if (nb < vpap->len) {
- /*
- * If it's now too short, it must be that userspace
- * has changed the mappings underlying guest memory,
- * so unregister the region.
- */
+ /*
+ * We need to pin the page pointed to by vpap->next_gpa,
+ * but we can't call kvmppc_pin_guest_page under the lock
+ * as it does get_user_pages() and down_read(). So we
+ * have to drop the lock, pin the page, then get the lock
+ * again and check that a new area didn't get registered
+ * in the meantime.
+ */
+ for (;;) {
+ gpa = vpap->next_gpa;
+ spin_unlock(&vcpu->arch.vpa_update_lock);
+ va = NULL;
+ nb = 0;
+ if (gpa)
+ va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
+ spin_lock(&vcpu->arch.vpa_update_lock);
+ if (gpa == vpap->next_gpa)
+ break;
+ /* sigh... unpin that one and try again */
+ if (va)
kvmppc_unpin_guest_page(kvm, va);
- va = NULL;
- }
+ }
+
+ vpap->update_pending = 0;
+ if (va && nb < vpap->len) {
+ /*
+ * If it's now too short, it must be that userspace
+ * has changed the mappings underlying guest memory,
+ * so unregister the region.
+ */
+ kvmppc_unpin_guest_page(kvm, va);
+ va = NULL;
}
if (vpap->pinned_addr)
kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
{
- struct kvm *kvm = vcpu->kvm;
-
spin_lock(&vcpu->arch.vpa_update_lock);
if (vcpu->arch.vpa.update_pending) {
- kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
+ kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
}
if (vcpu->arch.dtl.update_pending) {
- kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
+ kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
vcpu->arch.dtl_index = 0;
}
if (vcpu->arch.slb_shadow.update_pending)
- kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
+ kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
spin_unlock(&vcpu->arch.vpa_update_lock);
}
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
struct kvm_vcpu *vcpu, *vcpu0, *vnext;
long ret;
u64 now;
- int ptid, i;
+ int ptid, i, need_vpa_update;
/* don't start if any threads have a signal pending */
- list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ need_vpa_update = 0;
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
if (signal_pending(vcpu->arch.run_task))
return 0;
+ need_vpa_update |= vcpu->arch.vpa.update_pending |
+ vcpu->arch.slb_shadow.update_pending |
+ vcpu->arch.dtl.update_pending;
+ }
+
+ /*
+ * Initialize *vc, in particular vc->vcore_state, so we can
+ * drop the vcore lock if necessary.
+ */
+ vc->n_woken = 0;
+ vc->nap_count = 0;
+ vc->entry_exit_count = 0;
+ vc->vcore_state = VCORE_RUNNING;
+ vc->in_guest = 0;
+ vc->napping_threads = 0;
+
+ /*
+ * Updating any of the vpas requires calling kvmppc_pin_guest_page,
+ * which can't be called with any spinlocks held.
+ */
+ if (need_vpa_update) {
+ spin_unlock(&vc->lock);
+ list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
+ kvmppc_update_vpas(vcpu);
+ spin_lock(&vc->lock);
+ }
/*
* Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
if (vcpu->arch.ceded)
vcpu->arch.ptid = ptid++;
- vc->n_woken = 0;
- vc->nap_count = 0;
- vc->entry_exit_count = 0;
- vc->vcore_state = VCORE_RUNNING;
vc->stolen_tb += mftb() - vc->preempt_tb;
- vc->in_guest = 0;
vc->pcpu = smp_processor_id();
- vc->napping_threads = 0;
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
kvmppc_start_thread(vcpu);
- if (vcpu->arch.vpa.update_pending ||
- vcpu->arch.slb_shadow.update_pending ||
- vcpu->arch.dtl.update_pending)
- kvmppc_update_vpas(vcpu);
kvmppc_create_dtl_entry(vcpu, vc);
}
/* Grab any remaining hw threads so they can't go into the kernel */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index a84aafce2a12..5a84c8d3d040 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -72,9 +72,6 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
mtsrr1 r6
RFI
-#define ULONG_SIZE 8
-#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
-
/******************************************************************************
* *
* Entry code *
@@ -206,24 +203,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
/* Load up FP, VMX and VSX registers */
bl kvmppc_load_fp
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
+ ld r14, VCPU_GPR(R14)(r4)
+ ld r15, VCPU_GPR(R15)(r4)
+ ld r16, VCPU_GPR(R16)(r4)
+ ld r17, VCPU_GPR(R17)(r4)
+ ld r18, VCPU_GPR(R18)(r4)
+ ld r19, VCPU_GPR(R19)(r4)
+ ld r20, VCPU_GPR(R20)(r4)
+ ld r21, VCPU_GPR(R21)(r4)
+ ld r22, VCPU_GPR(R22)(r4)
+ ld r23, VCPU_GPR(R23)(r4)
+ ld r24, VCPU_GPR(R24)(r4)
+ ld r25, VCPU_GPR(R25)(r4)
+ ld r26, VCPU_GPR(R26)(r4)
+ ld r27, VCPU_GPR(R27)(r4)
+ ld r28, VCPU_GPR(R28)(r4)
+ ld r29, VCPU_GPR(R29)(r4)
+ ld r30, VCPU_GPR(R30)(r4)
+ ld r31, VCPU_GPR(R31)(r4)
BEGIN_FTR_SECTION
/* Switch DSCR to guest value */
@@ -547,21 +544,21 @@ fast_guest_return:
mtlr r5
mtcr r6
- ld r0, VCPU_GPR(r0)(r4)
- ld r1, VCPU_GPR(r1)(r4)
- ld r2, VCPU_GPR(r2)(r4)
- ld r3, VCPU_GPR(r3)(r4)
- ld r5, VCPU_GPR(r5)(r4)
- ld r6, VCPU_GPR(r6)(r4)
- ld r7, VCPU_GPR(r7)(r4)
- ld r8, VCPU_GPR(r8)(r4)
- ld r9, VCPU_GPR(r9)(r4)
- ld r10, VCPU_GPR(r10)(r4)
- ld r11, VCPU_GPR(r11)(r4)
- ld r12, VCPU_GPR(r12)(r4)
- ld r13, VCPU_GPR(r13)(r4)
-
- ld r4, VCPU_GPR(r4)(r4)
+ ld r0, VCPU_GPR(R0)(r4)
+ ld r1, VCPU_GPR(R1)(r4)
+ ld r2, VCPU_GPR(R2)(r4)
+ ld r3, VCPU_GPR(R3)(r4)
+ ld r5, VCPU_GPR(R5)(r4)
+ ld r6, VCPU_GPR(R6)(r4)
+ ld r7, VCPU_GPR(R7)(r4)
+ ld r8, VCPU_GPR(R8)(r4)
+ ld r9, VCPU_GPR(R9)(r4)
+ ld r10, VCPU_GPR(R10)(r4)
+ ld r11, VCPU_GPR(R11)(r4)
+ ld r12, VCPU_GPR(R12)(r4)
+ ld r13, VCPU_GPR(R13)(r4)
+
+ ld r4, VCPU_GPR(R4)(r4)
hrfid
b .
@@ -590,22 +587,22 @@ kvmppc_interrupt:
/* Save registers */
- std r0, VCPU_GPR(r0)(r9)
- std r1, VCPU_GPR(r1)(r9)
- std r2, VCPU_GPR(r2)(r9)
- std r3, VCPU_GPR(r3)(r9)
- std r4, VCPU_GPR(r4)(r9)
- std r5, VCPU_GPR(r5)(r9)
- std r6, VCPU_GPR(r6)(r9)
- std r7, VCPU_GPR(r7)(r9)
- std r8, VCPU_GPR(r8)(r9)
+ std r0, VCPU_GPR(R0)(r9)
+ std r1, VCPU_GPR(R1)(r9)
+ std r2, VCPU_GPR(R2)(r9)
+ std r3, VCPU_GPR(R3)(r9)
+ std r4, VCPU_GPR(R4)(r9)
+ std r5, VCPU_GPR(R5)(r9)
+ std r6, VCPU_GPR(R6)(r9)
+ std r7, VCPU_GPR(R7)(r9)
+ std r8, VCPU_GPR(R8)(r9)
ld r0, HSTATE_HOST_R2(r13)
- std r0, VCPU_GPR(r9)(r9)
- std r10, VCPU_GPR(r10)(r9)
- std r11, VCPU_GPR(r11)(r9)
+ std r0, VCPU_GPR(R9)(r9)
+ std r10, VCPU_GPR(R10)(r9)
+ std r11, VCPU_GPR(R11)(r9)
ld r3, HSTATE_SCRATCH0(r13)
lwz r4, HSTATE_SCRATCH1(r13)
- std r3, VCPU_GPR(r12)(r9)
+ std r3, VCPU_GPR(R12)(r9)
stw r4, VCPU_CR(r9)
/* Restore R1/R2 so we can handle faults */
@@ -626,7 +623,7 @@ kvmppc_interrupt:
GET_SCRATCH0(r3)
mflr r4
- std r3, VCPU_GPR(r13)(r9)
+ std r3, VCPU_GPR(R13)(r9)
std r4, VCPU_LR(r9)
/* Unset guest mode */
@@ -810,7 +807,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
lwz r3,VCORE_NAPPING_THREADS(r5)
lwz r4,VCPU_PTID(r9)
li r0,1
- sldi r0,r0,r4
+ sld r0,r0,r4
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
beq 43f
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
@@ -968,24 +965,24 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Save non-volatile GPRs */
- std r14, VCPU_GPR(r14)(r9)
- std r15, VCPU_GPR(r15)(r9)
- std r16, VCPU_GPR(r16)(r9)
- std r17, VCPU_GPR(r17)(r9)
- std r18, VCPU_GPR(r18)(r9)
- std r19, VCPU_GPR(r19)(r9)
- std r20, VCPU_GPR(r20)(r9)
- std r21, VCPU_GPR(r21)(r9)
- std r22, VCPU_GPR(r22)(r9)
- std r23, VCPU_GPR(r23)(r9)
- std r24, VCPU_GPR(r24)(r9)
- std r25, VCPU_GPR(r25)(r9)
- std r26, VCPU_GPR(r26)(r9)
- std r27, VCPU_GPR(r27)(r9)
- std r28, VCPU_GPR(r28)(r9)
- std r29, VCPU_GPR(r29)(r9)
- std r30, VCPU_GPR(r30)(r9)
- std r31, VCPU_GPR(r31)(r9)
+ std r14, VCPU_GPR(R14)(r9)
+ std r15, VCPU_GPR(R15)(r9)
+ std r16, VCPU_GPR(R16)(r9)
+ std r17, VCPU_GPR(R17)(r9)
+ std r18, VCPU_GPR(R18)(r9)
+ std r19, VCPU_GPR(R19)(r9)
+ std r20, VCPU_GPR(R20)(r9)
+ std r21, VCPU_GPR(R21)(r9)
+ std r22, VCPU_GPR(R22)(r9)
+ std r23, VCPU_GPR(R23)(r9)
+ std r24, VCPU_GPR(R24)(r9)
+ std r25, VCPU_GPR(R25)(r9)
+ std r26, VCPU_GPR(R26)(r9)
+ std r27, VCPU_GPR(R27)(r9)
+ std r28, VCPU_GPR(R28)(r9)
+ std r29, VCPU_GPR(R29)(r9)
+ std r30, VCPU_GPR(R30)(r9)
+ std r31, VCPU_GPR(R31)(r9)
/* Save SPRGs */
mfspr r3, SPRN_SPRG0
@@ -1067,6 +1064,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
mtspr SPRN_DABR,r5
mtspr SPRN_DABRX,r6
+ /* Restore SPRG3 */
+ ld r3,HSTATE_SPRG3(r13)
+ mtspr SPRN_SPRG3,r3
+
/*
* Reload DEC. HDEC interrupts were disabled when
* we reloaded the host's LPCR value.
@@ -1160,7 +1161,7 @@ kvmppc_hdsi:
andi. r0, r11, MSR_DR /* data relocation enabled? */
beq 3f
clrrdi r0, r4, 28
- PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
+ PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
bne 1f /* if no SLB entry found */
4: std r4, VCPU_FAULT_DAR(r9)
stw r6, VCPU_FAULT_DSISR(r9)
@@ -1234,7 +1235,7 @@ kvmppc_hisi:
andi. r0, r11, MSR_IR /* instruction relocation enabled? */
beq 3f
clrrdi r0, r10, 28
- PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
+ PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
bne 1f /* if no SLB entry found */
4:
/* Search the hash table. */
@@ -1278,7 +1279,7 @@ kvmppc_hisi:
*/
.globl hcall_try_real_mode
hcall_try_real_mode:
- ld r3,VCPU_GPR(r3)(r9)
+ ld r3,VCPU_GPR(R3)(r9)
andi. r0,r11,MSR_PR
bne hcall_real_cont
clrrdi r3,r3,2
@@ -1291,12 +1292,12 @@ hcall_try_real_mode:
add r3,r3,r4
mtctr r3
mr r3,r9 /* get vcpu pointer */
- ld r4,VCPU_GPR(r4)(r9)
+ ld r4,VCPU_GPR(R4)(r9)
bctrl
cmpdi r3,H_TOO_HARD
beq hcall_real_fallback
ld r4,HSTATE_KVM_VCPU(r13)
- std r3,VCPU_GPR(r3)(r4)
+ std r3,VCPU_GPR(R3)(r4)
ld r10,VCPU_PC(r4)
ld r11,VCPU_MSR(r4)
b fast_guest_return
@@ -1424,7 +1425,7 @@ _GLOBAL(kvmppc_h_cede)
li r0,0 /* set trap to 0 to say hcall is handled */
stw r0,VCPU_TRAP(r3)
li r0,H_SUCCESS
- std r0,VCPU_GPR(r3)(r3)
+ std r0,VCPU_GPR(R3)(r3)
BEGIN_FTR_SECTION
b 2f /* just send it up to host on 970 */
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
@@ -1443,7 +1444,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
addi r6,r5,VCORE_NAPPING_THREADS
31: lwarx r4,0,r6
or r4,r4,r0
- PPC_POPCNTW(r7,r4)
+ PPC_POPCNTW(R7,R4)
cmpw r7,r8
bge 2f
stwcx. r4,0,r6
@@ -1464,24 +1465,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
* DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
*/
/* Save non-volatile GPRs */
- std r14, VCPU_GPR(r14)(r3)
- std r15, VCPU_GPR(r15)(r3)
- std r16, VCPU_GPR(r16)(r3)
- std r17, VCPU_GPR(r17)(r3)
- std r18, VCPU_GPR(r18)(r3)
- std r19, VCPU_GPR(r19)(r3)
- std r20, VCPU_GPR(r20)(r3)
- std r21, VCPU_GPR(r21)(r3)
- std r22, VCPU_GPR(r22)(r3)
- std r23, VCPU_GPR(r23)(r3)
- std r24, VCPU_GPR(r24)(r3)
- std r25, VCPU_GPR(r25)(r3)
- std r26, VCPU_GPR(r26)(r3)
- std r27, VCPU_GPR(r27)(r3)
- std r28, VCPU_GPR(r28)(r3)
- std r29, VCPU_GPR(r29)(r3)
- std r30, VCPU_GPR(r30)(r3)
- std r31, VCPU_GPR(r31)(r3)
+ std r14, VCPU_GPR(R14)(r3)
+ std r15, VCPU_GPR(R15)(r3)
+ std r16, VCPU_GPR(R16)(r3)
+ std r17, VCPU_GPR(R17)(r3)
+ std r18, VCPU_GPR(R18)(r3)
+ std r19, VCPU_GPR(R19)(r3)
+ std r20, VCPU_GPR(R20)(r3)
+ std r21, VCPU_GPR(R21)(r3)
+ std r22, VCPU_GPR(R22)(r3)
+ std r23, VCPU_GPR(R23)(r3)
+ std r24, VCPU_GPR(R24)(r3)
+ std r25, VCPU_GPR(R25)(r3)
+ std r26, VCPU_GPR(R26)(r3)
+ std r27, VCPU_GPR(R27)(r3)
+ std r28, VCPU_GPR(R28)(r3)
+ std r29, VCPU_GPR(R29)(r3)
+ std r30, VCPU_GPR(R30)(r3)
+ std r31, VCPU_GPR(R31)(r3)
/* save FP state */
bl .kvmppc_save_fp
@@ -1513,24 +1514,24 @@ kvm_end_cede:
bl kvmppc_load_fp
/* Load NV GPRS */
- ld r14, VCPU_GPR(r14)(r4)
- ld r15, VCPU_GPR(r15)(r4)
- ld r16, VCPU_GPR(r16)(r4)
- ld r17, VCPU_GPR(r17)(r4)
- ld r18, VCPU_GPR(r18)(r4)
- ld r19, VCPU_GPR(r19)(r4)
- ld r20, VCPU_GPR(r20)(r4)
- ld r21, VCPU_GPR(r21)(r4)
- ld r22, VCPU_GPR(r22)(r4)
- ld r23, VCPU_GPR(r23)(r4)
- ld r24, VCPU_GPR(r24)(r4)
- ld r25, VCPU_GPR(r25)(r4)
- ld r26, VCPU_GPR(r26)(r4)
- ld r27, VCPU_GPR(r27)(r4)
- ld r28, VCPU_GPR(r28)(r4)
- ld r29, VCPU_GPR(r29)(r4)
- ld r30, VCPU_GPR(r30)(r4)
- ld r31, VCPU_GPR(r31)(r4)
+ ld r14, VCPU_GPR(R14)(r4)
+ ld r15, VCPU_GPR(R15)(r4)
+ ld r16, VCPU_GPR(R16)(r4)
+ ld r17, VCPU_GPR(R17)(r4)
+ ld r18, VCPU_GPR(R18)(r4)
+ ld r19, VCPU_GPR(R19)(r4)
+ ld r20, VCPU_GPR(R20)(r4)
+ ld r21, VCPU_GPR(R21)(r4)
+ ld r22, VCPU_GPR(R22)(r4)
+ ld r23, VCPU_GPR(R23)(r4)
+ ld r24, VCPU_GPR(R24)(r4)
+ ld r25, VCPU_GPR(R25)(r4)
+ ld r26, VCPU_GPR(R26)(r4)
+ ld r27, VCPU_GPR(R27)(r4)
+ ld r28, VCPU_GPR(R28)(r4)
+ ld r29, VCPU_GPR(R29)(r4)
+ ld r30, VCPU_GPR(R30)(r4)
+ ld r31, VCPU_GPR(R31)(r4)
/* clear our bit in vcore->napping_threads */
33: ld r5,HSTATE_KVM_VCORE(r13)
@@ -1649,7 +1650,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r6,reg*16+VCPU_VSRS
- STXVD2X(reg,r6,r3)
+ STXVD2X(reg,R6,R3)
reg = reg + 1
.endr
FTR_SECTION_ELSE
@@ -1711,7 +1712,7 @@ BEGIN_FTR_SECTION
reg = 0
.rept 32
li r7,reg*16+VCPU_VSRS
- LXVD2X(reg,r7,r4)
+ LXVD2X(reg,R7,R4)
reg = reg + 1
.endr
FTR_SECTION_ELSE
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index 3e35383bdb21..48cbbf862958 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -25,38 +25,30 @@
#include <asm/exception-64s.h>
#if defined(CONFIG_PPC_BOOK3S_64)
-
-#define ULONG_SIZE 8
#define FUNC(name) GLUE(.,name)
-
#elif defined(CONFIG_PPC_BOOK3S_32)
-
-#define ULONG_SIZE 4
#define FUNC(name) name
-
#endif /* CONFIG_PPC_BOOK3S_XX */
-
-#define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE))
#define VCPU_LOAD_NVGPRS(vcpu) \
- PPC_LL r14, VCPU_GPR(r14)(vcpu); \
- PPC_LL r15, VCPU_GPR(r15)(vcpu); \
- PPC_LL r16, VCPU_GPR(r16)(vcpu); \
- PPC_LL r17, VCPU_GPR(r17)(vcpu); \
- PPC_LL r18, VCPU_GPR(r18)(vcpu); \
- PPC_LL r19, VCPU_GPR(r19)(vcpu); \
- PPC_LL r20, VCPU_GPR(r20)(vcpu); \
- PPC_LL r21, VCPU_GPR(r21)(vcpu); \
- PPC_LL r22, VCPU_GPR(r22)(vcpu); \
- PPC_LL r23, VCPU_GPR(r23)(vcpu); \
- PPC_LL r24, VCPU_GPR(r24)(vcpu); \
- PPC_LL r25, VCPU_GPR(r25)(vcpu); \
- PPC_LL r26, VCPU_GPR(r26)(vcpu); \
- PPC_LL r27, VCPU_GPR(r27)(vcpu); \
- PPC_LL r28, VCPU_GPR(r28)(vcpu); \
- PPC_LL r29, VCPU_GPR(r29)(vcpu); \
- PPC_LL r30, VCPU_GPR(r30)(vcpu); \
- PPC_LL r31, VCPU_GPR(r31)(vcpu); \
+ PPC_LL r14, VCPU_GPR(R14)(vcpu); \
+ PPC_LL r15, VCPU_GPR(R15)(vcpu); \
+ PPC_LL r16, VCPU_GPR(R16)(vcpu); \
+ PPC_LL r17, VCPU_GPR(R17)(vcpu); \
+ PPC_LL r18, VCPU_GPR(R18)(vcpu); \
+ PPC_LL r19, VCPU_GPR(R19)(vcpu); \
+ PPC_LL r20, VCPU_GPR(R20)(vcpu); \
+ PPC_LL r21, VCPU_GPR(R21)(vcpu); \
+ PPC_LL r22, VCPU_GPR(R22)(vcpu); \
+ PPC_LL r23, VCPU_GPR(R23)(vcpu); \
+ PPC_LL r24, VCPU_GPR(R24)(vcpu); \
+ PPC_LL r25, VCPU_GPR(R25)(vcpu); \
+ PPC_LL r26, VCPU_GPR(R26)(vcpu); \
+ PPC_LL r27, VCPU_GPR(R27)(vcpu); \
+ PPC_LL r28, VCPU_GPR(R28)(vcpu); \
+ PPC_LL r29, VCPU_GPR(R29)(vcpu); \
+ PPC_LL r30, VCPU_GPR(R30)(vcpu); \
+ PPC_LL r31, VCPU_GPR(R31)(vcpu); \
/*****************************************************************************
* *
@@ -131,24 +123,24 @@ kvmppc_handler_highmem:
/* R7 = vcpu */
PPC_LL r7, GPR4(r1)
- PPC_STL r14, VCPU_GPR(r14)(r7)
- PPC_STL r15, VCPU_GPR(r15)(r7)
- PPC_STL r16, VCPU_GPR(r16)(r7)
- PPC_STL r17, VCPU_GPR(r17)(r7)
- PPC_STL r18, VCPU_GPR(r18)(r7)
- PPC_STL r19, VCPU_GPR(r19)(r7)
- PPC_STL r20, VCPU_GPR(r20)(r7)
- PPC_STL r21, VCPU_GPR(r21)(r7)
- PPC_STL r22, VCPU_GPR(r22)(r7)
- PPC_STL r23, VCPU_GPR(r23)(r7)
- PPC_STL r24, VCPU_GPR(r24)(r7)
- PPC_STL r25, VCPU_GPR(r25)(r7)
- PPC_STL r26, VCPU_GPR(r26)(r7)
- PPC_STL r27, VCPU_GPR(r27)(r7)
- PPC_STL r28, VCPU_GPR(r28)(r7)
- PPC_STL r29, VCPU_GPR(r29)(r7)
- PPC_STL r30, VCPU_GPR(r30)(r7)
- PPC_STL r31, VCPU_GPR(r31)(r7)
+ PPC_STL r14, VCPU_GPR(R14)(r7)
+ PPC_STL r15, VCPU_GPR(R15)(r7)
+ PPC_STL r16, VCPU_GPR(R16)(r7)
+ PPC_STL r17, VCPU_GPR(R17)(r7)
+ PPC_STL r18, VCPU_GPR(R18)(r7)
+ PPC_STL r19, VCPU_GPR(R19)(r7)
+ PPC_STL r20, VCPU_GPR(R20)(r7)
+ PPC_STL r21, VCPU_GPR(R21)(r7)
+ PPC_STL r22, VCPU_GPR(R22)(r7)
+ PPC_STL r23, VCPU_GPR(R23)(r7)
+ PPC_STL r24, VCPU_GPR(R24)(r7)
+ PPC_STL r25, VCPU_GPR(R25)(r7)
+ PPC_STL r26, VCPU_GPR(R26)(r7)
+ PPC_STL r27, VCPU_GPR(R27)(r7)
+ PPC_STL r28, VCPU_GPR(R28)(r7)
+ PPC_STL r29, VCPU_GPR(R29)(r7)
+ PPC_STL r30, VCPU_GPR(R30)(r7)
+ PPC_STL r31, VCPU_GPR(R31)(r7)
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
mr r5, r12
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index 3ff9013d6e79..ee02b30878ed 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -241,6 +241,7 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
case H_PUT_TCE:
return kvmppc_h_pr_put_tce(vcpu);
case H_CEDE:
+ vcpu->arch.shared->msr |= MSR_EE;
kvm_vcpu_block(vcpu);
clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
vcpu->stat.halt_wakeup++;
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index 34187585c507..ab523f3c1731 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -37,7 +37,6 @@
#if defined(CONFIG_PPC_BOOK3S_64)
#define FUNC(name) GLUE(.,name)
-#define MTMSR_EERI(reg) mtmsrd (reg),1
.globl kvmppc_skip_interrupt
kvmppc_skip_interrupt:
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 798491a268b3..1abe4788191a 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -23,7 +23,6 @@
#define GET_SHADOW_VCPU(reg) \
mr reg, r13
-#define MTMSR_EERI(reg) mtmsrd (reg),1
#elif defined(CONFIG_PPC_BOOK3S_32)
@@ -31,7 +30,6 @@
tophys(reg, r2); \
lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
tophys(reg, reg)
-#define MTMSR_EERI(reg) mtmsr (reg)
#endif
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 09456c4719f3..bb46b32f9813 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -25,8 +25,6 @@
#include <asm/page.h>
#include <asm/asm-offsets.h>
-#define VCPU_GPR(n) (VCPU_GPRS + (n * 4))
-
/* The host stack layout: */
#define HOST_R1 0 /* Implied by stwu. */
#define HOST_CALLEE_LR 4
@@ -36,8 +34,9 @@
#define HOST_R2 12
#define HOST_CR 16
#define HOST_NV_GPRS 20
-#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
-#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + 4)
+#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * 4))
+#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
+#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + 4)
#define HOST_STACK_SIZE (((HOST_MIN_STACK_SIZE + 15) / 16) * 16) /* Align. */
#define HOST_STACK_LR (HOST_STACK_SIZE + 4) /* In caller stack frame. */
@@ -58,12 +57,12 @@ _GLOBAL(kvmppc_handler_\ivor_nr)
/* Get pointer to vcpu and record exit number. */
mtspr \scratch , r4
mfspr r4, SPRN_SPRG_RVCPU
- stw r3, VCPU_GPR(r3)(r4)
- stw r5, VCPU_GPR(r5)(r4)
- stw r6, VCPU_GPR(r6)(r4)
+ stw r3, VCPU_GPR(R3)(r4)
+ stw r5, VCPU_GPR(R5)(r4)
+ stw r6, VCPU_GPR(R6)(r4)
mfspr r3, \scratch
mfctr r5
- stw r3, VCPU_GPR(r4)(r4)
+ stw r3, VCPU_GPR(R4)(r4)
stw r5, VCPU_CTR(r4)
mfspr r3, \srr0
lis r6, kvmppc_resume_host@h
@@ -106,9 +105,9 @@ _GLOBAL(kvmppc_handler_len)
_GLOBAL(kvmppc_resume_host)
mfcr r3
stw r3, VCPU_CR(r4)
- stw r7, VCPU_GPR(r7)(r4)
- stw r8, VCPU_GPR(r8)(r4)
- stw r9, VCPU_GPR(r9)(r4)
+ stw r7, VCPU_GPR(R7)(r4)
+ stw r8, VCPU_GPR(R8)(r4)
+ stw r9, VCPU_GPR(R9)(r4)
li r6, 1
slw r6, r6, r5
@@ -138,23 +137,23 @@ _GLOBAL(kvmppc_resume_host)
isync
stw r9, VCPU_LAST_INST(r4)
- stw r15, VCPU_GPR(r15)(r4)
- stw r16, VCPU_GPR(r16)(r4)
- stw r17, VCPU_GPR(r17)(r4)
- stw r18, VCPU_GPR(r18)(r4)
- stw r19, VCPU_GPR(r19)(r4)
- stw r20, VCPU_GPR(r20)(r4)
- stw r21, VCPU_GPR(r21)(r4)
- stw r22, VCPU_GPR(r22)(r4)
- stw r23, VCPU_GPR(r23)(r4)
- stw r24, VCPU_GPR(r24)(r4)
- stw r25, VCPU_GPR(r25)(r4)
- stw r26, VCPU_GPR(r26)(r4)
- stw r27, VCPU_GPR(r27)(r4)
- stw r28, VCPU_GPR(r28)(r4)
- stw r29, VCPU_GPR(r29)(r4)
- stw r30, VCPU_GPR(r30)(r4)
- stw r31, VCPU_GPR(r31)(r4)
+ stw r15, VCPU_GPR(R15)(r4)
+ stw r16, VCPU_GPR(R16)(r4)
+ stw r17, VCPU_GPR(R17)(r4)
+ stw r18, VCPU_GPR(R18)(r4)
+ stw r19, VCPU_GPR(R19)(r4)
+ stw r20, VCPU_GPR(R20)(r4)
+ stw r21, VCPU_GPR(R21)(r4)
+ stw r22, VCPU_GPR(R22)(r4)
+ stw r23, VCPU_GPR(R23)(r4)
+ stw r24, VCPU_GPR(R24)(r4)
+ stw r25, VCPU_GPR(R25)(r4)
+ stw r26, VCPU_GPR(R26)(r4)
+ stw r27, VCPU_GPR(R27)(r4)
+ stw r28, VCPU_GPR(R28)(r4)
+ stw r29, VCPU_GPR(R29)(r4)
+ stw r30, VCPU_GPR(R30)(r4)
+ stw r31, VCPU_GPR(R31)(r4)
..skip_inst_copy:
/* Also grab DEAR and ESR before the host can clobber them. */
@@ -172,14 +171,14 @@ _GLOBAL(kvmppc_resume_host)
..skip_esr:
/* Save remaining volatile guest register state to vcpu. */
- stw r0, VCPU_GPR(r0)(r4)
- stw r1, VCPU_GPR(r1)(r4)
- stw r2, VCPU_GPR(r2)(r4)
- stw r10, VCPU_GPR(r10)(r4)
- stw r11, VCPU_GPR(r11)(r4)
- stw r12, VCPU_GPR(r12)(r4)
- stw r13, VCPU_GPR(r13)(r4)
- stw r14, VCPU_GPR(r14)(r4) /* We need a NV GPR below. */
+ stw r0, VCPU_GPR(R0)(r4)
+ stw r1, VCPU_GPR(R1)(r4)
+ stw r2, VCPU_GPR(R2)(r4)
+ stw r10, VCPU_GPR(R10)(r4)
+ stw r11, VCPU_GPR(R11)(r4)
+ stw r12, VCPU_GPR(R12)(r4)
+ stw r13, VCPU_GPR(R13)(r4)
+ stw r14, VCPU_GPR(R14)(r4) /* We need a NV GPR below. */
mflr r3
stw r3, VCPU_LR(r4)
mfxer r3
@@ -213,28 +212,28 @@ _GLOBAL(kvmppc_resume_host)
/* Restore vcpu pointer and the nonvolatiles we used. */
mr r4, r14
- lwz r14, VCPU_GPR(r14)(r4)
+ lwz r14, VCPU_GPR(R14)(r4)
/* Sometimes instruction emulation must restore complete GPR state. */
andi. r5, r3, RESUME_FLAG_NV
beq ..skip_nv_load
- lwz r15, VCPU_GPR(r15)(r4)
- lwz r16, VCPU_GPR(r16)(r4)
- lwz r17, VCPU_GPR(r17)(r4)
- lwz r18, VCPU_GPR(r18)(r4)
- lwz r19, VCPU_GPR(r19)(r4)
- lwz r20, VCPU_GPR(r20)(r4)
- lwz r21, VCPU_GPR(r21)(r4)
- lwz r22, VCPU_GPR(r22)(r4)
- lwz r23, VCPU_GPR(r23)(r4)
- lwz r24, VCPU_GPR(r24)(r4)
- lwz r25, VCPU_GPR(r25)(r4)
- lwz r26, VCPU_GPR(r26)(r4)
- lwz r27, VCPU_GPR(r27)(r4)
- lwz r28, VCPU_GPR(r28)(r4)
- lwz r29, VCPU_GPR(r29)(r4)
- lwz r30, VCPU_GPR(r30)(r4)
- lwz r31, VCPU_GPR(r31)(r4)
+ lwz r15, VCPU_GPR(R15)(r4)
+ lwz r16, VCPU_GPR(R16)(r4)
+ lwz r17, VCPU_GPR(R17)(r4)
+ lwz r18, VCPU_GPR(R18)(r4)
+ lwz r19, VCPU_GPR(R19)(r4)
+ lwz r20, VCPU_GPR(R20)(r4)
+ lwz r21, VCPU_GPR(R21)(r4)
+ lwz r22, VCPU_GPR(R22)(r4)
+ lwz r23, VCPU_GPR(R23)(r4)
+ lwz r24, VCPU_GPR(R24)(r4)
+ lwz r25, VCPU_GPR(R25)(r4)
+ lwz r26, VCPU_GPR(R26)(r4)
+ lwz r27, VCPU_GPR(R27)(r4)
+ lwz r28, VCPU_GPR(R28)(r4)
+ lwz r29, VCPU_GPR(R29)(r4)
+ lwz r30, VCPU_GPR(R30)(r4)
+ lwz r31, VCPU_GPR(R31)(r4)
..skip_nv_load:
/* Should we return to the guest? */
@@ -256,43 +255,43 @@ heavyweight_exit:
/* We already saved guest volatile register state; now save the
* non-volatiles. */
- stw r15, VCPU_GPR(r15)(r4)
- stw r16, VCPU_GPR(r16)(r4)
- stw r17, VCPU_GPR(r17)(r4)
- stw r18, VCPU_GPR(r18)(r4)
- stw r19, VCPU_GPR(r19)(r4)
- stw r20, VCPU_GPR(r20)(r4)
- stw r21, VCPU_GPR(r21)(r4)
- stw r22, VCPU_GPR(r22)(r4)
- stw r23, VCPU_GPR(r23)(r4)
- stw r24, VCPU_GPR(r24)(r4)
- stw r25, VCPU_GPR(r25)(r4)
- stw r26, VCPU_GPR(r26)(r4)
- stw r27, VCPU_GPR(r27)(r4)
- stw r28, VCPU_GPR(r28)(r4)
- stw r29, VCPU_GPR(r29)(r4)
- stw r30, VCPU_GPR(r30)(r4)
- stw r31, VCPU_GPR(r31)(r4)
+ stw r15, VCPU_GPR(R15)(r4)
+ stw r16, VCPU_GPR(R16)(r4)
+ stw r17, VCPU_GPR(R17)(r4)
+ stw r18, VCPU_GPR(R18)(r4)
+ stw r19, VCPU_GPR(R19)(r4)
+ stw r20, VCPU_GPR(R20)(r4)
+ stw r21, VCPU_GPR(R21)(r4)
+ stw r22, VCPU_GPR(R22)(r4)
+ stw r23, VCPU_GPR(R23)(r4)
+ stw r24, VCPU_GPR(R24)(r4)
+ stw r25, VCPU_GPR(R25)(r4)
+ stw r26, VCPU_GPR(R26)(r4)
+ stw r27, VCPU_GPR(R27)(r4)
+ stw r28, VCPU_GPR(R28)(r4)
+ stw r29, VCPU_GPR(R29)(r4)
+ stw r30, VCPU_GPR(R30)(r4)
+ stw r31, VCPU_GPR(R31)(r4)
/* Load host non-volatile register state from host stack. */
- lwz r14, HOST_NV_GPR(r14)(r1)
- lwz r15, HOST_NV_GPR(r15)(r1)
- lwz r16, HOST_NV_GPR(r16)(r1)
- lwz r17, HOST_NV_GPR(r17)(r1)
- lwz r18, HOST_NV_GPR(r18)(r1)
- lwz r19, HOST_NV_GPR(r19)(r1)
- lwz r20, HOST_NV_GPR(r20)(r1)
- lwz r21, HOST_NV_GPR(r21)(r1)
- lwz r22, HOST_NV_GPR(r22)(r1)
- lwz r23, HOST_NV_GPR(r23)(r1)
- lwz r24, HOST_NV_GPR(r24)(r1)
- lwz r25, HOST_NV_GPR(r25)(r1)
- lwz r26, HOST_NV_GPR(r26)(r1)
- lwz r27, HOST_NV_GPR(r27)(r1)
- lwz r28, HOST_NV_GPR(r28)(r1)
- lwz r29, HOST_NV_GPR(r29)(r1)
- lwz r30, HOST_NV_GPR(r30)(r1)
- lwz r31, HOST_NV_GPR(r31)(r1)
+ lwz r14, HOST_NV_GPR(R14)(r1)
+ lwz r15, HOST_NV_GPR(R15)(r1)
+ lwz r16, HOST_NV_GPR(R16)(r1)
+ lwz r17, HOST_NV_GPR(R17)(r1)
+ lwz r18, HOST_NV_GPR(R18)(r1)
+ lwz r19, HOST_NV_GPR(R19)(r1)
+ lwz r20, HOST_NV_GPR(R20)(r1)
+ lwz r21, HOST_NV_GPR(R21)(r1)
+ lwz r22, HOST_NV_GPR(R22)(r1)
+ lwz r23, HOST_NV_GPR(R23)(r1)
+ lwz r24, HOST_NV_GPR(R24)(r1)
+ lwz r25, HOST_NV_GPR(R25)(r1)
+ lwz r26, HOST_NV_GPR(R26)(r1)
+ lwz r27, HOST_NV_GPR(R27)(r1)
+ lwz r28, HOST_NV_GPR(R28)(r1)
+ lwz r29, HOST_NV_GPR(R29)(r1)
+ lwz r30, HOST_NV_GPR(R30)(r1)
+ lwz r31, HOST_NV_GPR(R31)(r1)
/* Return to kvm_vcpu_run(). */
lwz r4, HOST_STACK_LR(r1)
@@ -320,44 +319,44 @@ _GLOBAL(__kvmppc_vcpu_run)
stw r5, HOST_CR(r1)
/* Save host non-volatile register state to stack. */
- stw r14, HOST_NV_GPR(r14)(r1)
- stw r15, HOST_NV_GPR(r15)(r1)
- stw r16, HOST_NV_GPR(r16)(r1)
- stw r17, HOST_NV_GPR(r17)(r1)
- stw r18, HOST_NV_GPR(r18)(r1)
- stw r19, HOST_NV_GPR(r19)(r1)
- stw r20, HOST_NV_GPR(r20)(r1)
- stw r21, HOST_NV_GPR(r21)(r1)
- stw r22, HOST_NV_GPR(r22)(r1)
- stw r23, HOST_NV_GPR(r23)(r1)
- stw r24, HOST_NV_GPR(r24)(r1)
- stw r25, HOST_NV_GPR(r25)(r1)
- stw r26, HOST_NV_GPR(r26)(r1)
- stw r27, HOST_NV_GPR(r27)(r1)
- stw r28, HOST_NV_GPR(r28)(r1)
- stw r29, HOST_NV_GPR(r29)(r1)
- stw r30, HOST_NV_GPR(r30)(r1)
- stw r31, HOST_NV_GPR(r31)(r1)
+ stw r14, HOST_NV_GPR(R14)(r1)
+ stw r15, HOST_NV_GPR(R15)(r1)
+ stw r16, HOST_NV_GPR(R16)(r1)
+ stw r17, HOST_NV_GPR(R17)(r1)
+ stw r18, HOST_NV_GPR(R18)(r1)
+ stw r19, HOST_NV_GPR(R19)(r1)
+ stw r20, HOST_NV_GPR(R20)(r1)
+ stw r21, HOST_NV_GPR(R21)(r1)
+ stw r22, HOST_NV_GPR(R22)(r1)
+ stw r23, HOST_NV_GPR(R23)(r1)
+ stw r24, HOST_NV_GPR(R24)(r1)
+ stw r25, HOST_NV_GPR(R25)(r1)
+ stw r26, HOST_NV_GPR(R26)(r1)
+ stw r27, HOST_NV_GPR(R27)(r1)
+ stw r28, HOST_NV_GPR(R28)(r1)
+ stw r29, HOST_NV_GPR(R29)(r1)
+ stw r30, HOST_NV_GPR(R30)(r1)
+ stw r31, HOST_NV_GPR(R31)(r1)
/* Load guest non-volatiles. */
- lwz r14, VCPU_GPR(r14)(r4)
- lwz r15, VCPU_GPR(r15)(r4)
- lwz r16, VCPU_GPR(r16)(r4)
- lwz r17, VCPU_GPR(r17)(r4)
- lwz r18, VCPU_GPR(r18)(r4)
- lwz r19, VCPU_GPR(r19)(r4)
- lwz r20, VCPU_GPR(r20)(r4)
- lwz r21, VCPU_GPR(r21)(r4)
- lwz r22, VCPU_GPR(r22)(r4)
- lwz r23, VCPU_GPR(r23)(r4)
- lwz r24, VCPU_GPR(r24)(r4)
- lwz r25, VCPU_GPR(r25)(r4)
- lwz r26, VCPU_GPR(r26)(r4)
- lwz r27, VCPU_GPR(r27)(r4)
- lwz r28, VCPU_GPR(r28)(r4)
- lwz r29, VCPU_GPR(r29)(r4)
- lwz r30, VCPU_GPR(r30)(r4)
- lwz r31, VCPU_GPR(r31)(r4)
+ lwz r14, VCPU_GPR(R14)(r4)
+ lwz r15, VCPU_GPR(R15)(r4)
+ lwz r16, VCPU_GPR(R16)(r4)
+ lwz r17, VCPU_GPR(R17)(r4)
+ lwz r18, VCPU_GPR(R18)(r4)
+ lwz r19, VCPU_GPR(R19)(r4)
+ lwz r20, VCPU_GPR(R20)(r4)
+ lwz r21, VCPU_GPR(R21)(r4)
+ lwz r22, VCPU_GPR(R22)(r4)
+ lwz r23, VCPU_GPR(R23)(r4)
+ lwz r24, VCPU_GPR(R24)(r4)
+ lwz r25, VCPU_GPR(R25)(r4)
+ lwz r26, VCPU_GPR(R26)(r4)
+ lwz r27, VCPU_GPR(R27)(r4)
+ lwz r28, VCPU_GPR(R28)(r4)
+ lwz r29, VCPU_GPR(R29)(r4)
+ lwz r30, VCPU_GPR(R30)(r4)
+ lwz r31, VCPU_GPR(R31)(r4)
#ifdef CONFIG_SPE
/* save host SPEFSCR and load guest SPEFSCR */
@@ -385,13 +384,13 @@ lightweight_exit:
#endif
/* Load some guest volatiles. */
- lwz r0, VCPU_GPR(r0)(r4)
- lwz r2, VCPU_GPR(r2)(r4)
- lwz r9, VCPU_GPR(r9)(r4)
- lwz r10, VCPU_GPR(r10)(r4)
- lwz r11, VCPU_GPR(r11)(r4)
- lwz r12, VCPU_GPR(r12)(r4)
- lwz r13, VCPU_GPR(r13)(r4)
+ lwz r0, VCPU_GPR(R0)(r4)
+ lwz r2, VCPU_GPR(R2)(r4)
+ lwz r9, VCPU_GPR(R9)(r4)
+ lwz r10, VCPU_GPR(R10)(r4)
+ lwz r11, VCPU_GPR(R11)(r4)
+ lwz r12, VCPU_GPR(R12)(r4)
+ lwz r13, VCPU_GPR(R13)(r4)
lwz r3, VCPU_LR(r4)
mtlr r3
lwz r3, VCPU_XER(r4)
@@ -410,7 +409,7 @@ lightweight_exit:
/* Can't switch the stack pointer until after IVPR is switched,
* because host interrupt handlers would get confused. */
- lwz r1, VCPU_GPR(r1)(r4)
+ lwz r1, VCPU_GPR(R1)(r4)
/*
* Host interrupt handlers may have clobbered these
@@ -448,10 +447,10 @@ lightweight_exit:
mtcr r5
mtsrr0 r6
mtsrr1 r7
- lwz r5, VCPU_GPR(r5)(r4)
- lwz r6, VCPU_GPR(r6)(r4)
- lwz r7, VCPU_GPR(r7)(r4)
- lwz r8, VCPU_GPR(r8)(r4)
+ lwz r5, VCPU_GPR(R5)(r4)
+ lwz r6, VCPU_GPR(R6)(r4)
+ lwz r7, VCPU_GPR(R7)(r4)
+ lwz r8, VCPU_GPR(R8)(r4)
/* Clear any debug events which occurred since we disabled MSR[DE].
* XXX This gives us a 3-instruction window in which a breakpoint
@@ -460,8 +459,8 @@ lightweight_exit:
ori r3, r3, 0xffff
mtspr SPRN_DBSR, r3
- lwz r3, VCPU_GPR(r3)(r4)
- lwz r4, VCPU_GPR(r4)(r4)
+ lwz r3, VCPU_GPR(R3)(r4)
+ lwz r4, VCPU_GPR(R4)(r4)
rfi
#ifdef CONFIG_SPE
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index 0fa2ef7df036..d28c2d43ac1b 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -37,7 +37,6 @@
#define LONGBYTES (BITS_PER_LONG / 8)
-#define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES))
#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
/* The host stack layout: */
@@ -67,15 +66,15 @@
*/
.macro kvm_handler_common intno, srr0, flags
/* Restore host stack pointer */
- PPC_STL r1, VCPU_GPR(r1)(r4)
- PPC_STL r2, VCPU_GPR(r2)(r4)
+ PPC_STL r1, VCPU_GPR(R1)(r4)
+ PPC_STL r2, VCPU_GPR(R2)(r4)
PPC_LL r1, VCPU_HOST_STACK(r4)
PPC_LL r2, HOST_R2(r1)
mfspr r10, SPRN_PID
lwz r8, VCPU_HOST_PID(r4)
PPC_LL r11, VCPU_SHARED(r4)
- PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */
+ PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */
li r14, \intno
stw r10, VCPU_GUEST_PID(r4)
@@ -137,35 +136,31 @@
*/
mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
- PPC_STL r15, VCPU_GPR(r15)(r4)
- PPC_STL r16, VCPU_GPR(r16)(r4)
- PPC_STL r17, VCPU_GPR(r17)(r4)
- PPC_STL r18, VCPU_GPR(r18)(r4)
- PPC_STL r19, VCPU_GPR(r19)(r4)
+ PPC_STL r15, VCPU_GPR(R15)(r4)
+ PPC_STL r16, VCPU_GPR(R16)(r4)
+ PPC_STL r17, VCPU_GPR(R17)(r4)
+ PPC_STL r18, VCPU_GPR(R18)(r4)
+ PPC_STL r19, VCPU_GPR(R19)(r4)
mr r8, r3
- PPC_STL r20, VCPU_GPR(r20)(r4)
+ PPC_STL r20, VCPU_GPR(R20)(r4)
rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
- PPC_STL r21, VCPU_GPR(r21)(r4)
+ PPC_STL r21, VCPU_GPR(R21)(r4)
rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
- PPC_STL r22, VCPU_GPR(r22)(r4)
+ PPC_STL r22, VCPU_GPR(R22)(r4)
rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
- PPC_STL r23, VCPU_GPR(r23)(r4)
- PPC_STL r24, VCPU_GPR(r24)(r4)
- PPC_STL r25, VCPU_GPR(r25)(r4)
- PPC_STL r26, VCPU_GPR(r26)(r4)
- PPC_STL r27, VCPU_GPR(r27)(r4)
- PPC_STL r28, VCPU_GPR(r28)(r4)
- PPC_STL r29, VCPU_GPR(r29)(r4)
- PPC_STL r30, VCPU_GPR(r30)(r4)
- PPC_STL r31, VCPU_GPR(r31)(r4)
+ PPC_STL r23, VCPU_GPR(R23)(r4)
+ PPC_STL r24, VCPU_GPR(R24)(r4)
+ PPC_STL r25, VCPU_GPR(R25)(r4)
+ PPC_STL r26, VCPU_GPR(R26)(r4)
+ PPC_STL r27, VCPU_GPR(R27)(r4)
+ PPC_STL r28, VCPU_GPR(R28)(r4)
+ PPC_STL r29, VCPU_GPR(R29)(r4)
+ PPC_STL r30, VCPU_GPR(R30)(r4)
+ PPC_STL r31, VCPU_GPR(R31)(r4)
mtspr SPRN_EPLC, r8
/* disable preemption, so we are sure we hit the fixup handler */
-#ifdef CONFIG_PPC64
- clrrdi r8,r1,THREAD_SHIFT
-#else
- rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */
-#endif
+ CURRENT_THREAD_INFO(r8, r1)
li r7, 1
stw r7, TI_PREEMPT(r8)
@@ -211,24 +206,24 @@
.macro kvm_handler intno srr0, srr1, flags
_GLOBAL(kvmppc_handler_\intno\()_\srr1)
GET_VCPU(r11, r10)
- PPC_STL r3, VCPU_GPR(r3)(r11)
+ PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, SPRN_SPRG_RSCRATCH0
- PPC_STL r4, VCPU_GPR(r4)(r11)
+ PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, THREAD_NORMSAVE(0)(r10)
- PPC_STL r5, VCPU_GPR(r5)(r11)
+ PPC_STL r5, VCPU_GPR(R5)(r11)
stw r13, VCPU_CR(r11)
mfspr r5, \srr0
- PPC_STL r3, VCPU_GPR(r10)(r11)
+ PPC_STL r3, VCPU_GPR(R10)(r11)
PPC_LL r3, THREAD_NORMSAVE(2)(r10)
- PPC_STL r6, VCPU_GPR(r6)(r11)
- PPC_STL r4, VCPU_GPR(r11)(r11)
+ PPC_STL r6, VCPU_GPR(R6)(r11)
+ PPC_STL r4, VCPU_GPR(R11)(r11)
mfspr r6, \srr1
- PPC_STL r7, VCPU_GPR(r7)(r11)
- PPC_STL r8, VCPU_GPR(r8)(r11)
- PPC_STL r9, VCPU_GPR(r9)(r11)
- PPC_STL r3, VCPU_GPR(r13)(r11)
+ PPC_STL r7, VCPU_GPR(R7)(r11)
+ PPC_STL r8, VCPU_GPR(R8)(r11)
+ PPC_STL r9, VCPU_GPR(R9)(r11)
+ PPC_STL r3, VCPU_GPR(R13)(r11)
mfctr r7
- PPC_STL r12, VCPU_GPR(r12)(r11)
+ PPC_STL r12, VCPU_GPR(R12)(r11)
PPC_STL r7, VCPU_CTR(r11)
mr r4, r11
kvm_handler_common \intno, \srr0, \flags
@@ -238,25 +233,25 @@ _GLOBAL(kvmppc_handler_\intno\()_\srr1)
_GLOBAL(kvmppc_handler_\intno\()_\srr1)
mfspr r10, SPRN_SPRG_THREAD
GET_VCPU(r11, r10)
- PPC_STL r3, VCPU_GPR(r3)(r11)
+ PPC_STL r3, VCPU_GPR(R3)(r11)
mfspr r3, \scratch
- PPC_STL r4, VCPU_GPR(r4)(r11)
+ PPC_STL r4, VCPU_GPR(R4)(r11)
PPC_LL r4, GPR9(r8)
- PPC_STL r5, VCPU_GPR(r5)(r11)
+ PPC_STL r5, VCPU_GPR(R5)(r11)
stw r9, VCPU_CR(r11)
mfspr r5, \srr0
- PPC_STL r3, VCPU_GPR(r8)(r11)
+ PPC_STL r3, VCPU_GPR(R8)(r11)
PPC_LL r3, GPR10(r8)
- PPC_STL r6, VCPU_GPR(r6)(r11)
- PPC_STL r4, VCPU_GPR(r9)(r11)
+ PPC_STL r6, VCPU_GPR(R6)(r11)
+ PPC_STL r4, VCPU_GPR(R9)(r11)
mfspr r6, \srr1
PPC_LL r4, GPR11(r8)
- PPC_STL r7, VCPU_GPR(r7)(r11)
- PPC_STL r3, VCPU_GPR(r10)(r11)
+ PPC_STL r7, VCPU_GPR(R7)(r11)
+ PPC_STL r3, VCPU_GPR(R10)(r11)
mfctr r7
- PPC_STL r12, VCPU_GPR(r12)(r11)
- PPC_STL r13, VCPU_GPR(r13)(r11)
- PPC_STL r4, VCPU_GPR(r11)(r11)
+ PPC_STL r12, VCPU_GPR(R12)(r11)
+ PPC_STL r13, VCPU_GPR(R13)(r11)
+ PPC_STL r4, VCPU_GPR(R11)(r11)
PPC_STL r7, VCPU_CTR(r11)
mr r4, r11
kvm_handler_common \intno, \srr0, \flags
@@ -310,7 +305,7 @@ kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
_GLOBAL(kvmppc_resume_host)
/* Save remaining volatile guest register state to vcpu. */
mfspr r3, SPRN_VRSAVE
- PPC_STL r0, VCPU_GPR(r0)(r4)
+ PPC_STL r0, VCPU_GPR(R0)(r4)
mflr r5
mfspr r6, SPRN_SPRG4
PPC_STL r5, VCPU_LR(r4)
@@ -358,27 +353,27 @@ _GLOBAL(kvmppc_resume_host)
/* Restore vcpu pointer and the nonvolatiles we used. */
mr r4, r14
- PPC_LL r14, VCPU_GPR(r14)(r4)
+ PPC_LL r14, VCPU_GPR(R14)(r4)
andi. r5, r3, RESUME_FLAG_NV
beq skip_nv_load
- PPC_LL r15, VCPU_GPR(r15)(r4)
- PPC_LL r16, VCPU_GPR(r16)(r4)
- PPC_LL r17, VCPU_GPR(r17)(r4)
- PPC_LL r18, VCPU_GPR(r18)(r4)
- PPC_LL r19, VCPU_GPR(r19)(r4)
- PPC_LL r20, VCPU_GPR(r20)(r4)
- PPC_LL r21, VCPU_GPR(r21)(r4)
- PPC_LL r22, VCPU_GPR(r22)(r4)
- PPC_LL r23, VCPU_GPR(r23)(r4)
- PPC_LL r24, VCPU_GPR(r24)(r4)
- PPC_LL r25, VCPU_GPR(r25)(r4)
- PPC_LL r26, VCPU_GPR(r26)(r4)
- PPC_LL r27, VCPU_GPR(r27)(r4)
- PPC_LL r28, VCPU_GPR(r28)(r4)
- PPC_LL r29, VCPU_GPR(r29)(r4)
- PPC_LL r30, VCPU_GPR(r30)(r4)
- PPC_LL r31, VCPU_GPR(r31)(r4)
+ PPC_LL r15, VCPU_GPR(R15)(r4)
+ PPC_LL r16, VCPU_GPR(R16)(r4)
+ PPC_LL r17, VCPU_GPR(R17)(r4)
+ PPC_LL r18, VCPU_GPR(R18)(r4)
+ PPC_LL r19, VCPU_GPR(R19)(r4)
+ PPC_LL r20, VCPU_GPR(R20)(r4)
+ PPC_LL r21, VCPU_GPR(R21)(r4)
+ PPC_LL r22, VCPU_GPR(R22)(r4)
+ PPC_LL r23, VCPU_GPR(R23)(r4)
+ PPC_LL r24, VCPU_GPR(R24)(r4)
+ PPC_LL r25, VCPU_GPR(R25)(r4)
+ PPC_LL r26, VCPU_GPR(R26)(r4)
+ PPC_LL r27, VCPU_GPR(R27)(r4)
+ PPC_LL r28, VCPU_GPR(R28)(r4)
+ PPC_LL r29, VCPU_GPR(R29)(r4)
+ PPC_LL r30, VCPU_GPR(R30)(r4)
+ PPC_LL r31, VCPU_GPR(R31)(r4)
skip_nv_load:
/* Should we return to the guest? */
andi. r5, r3, RESUME_FLAG_HOST
@@ -396,23 +391,23 @@ heavyweight_exit:
* non-volatiles.
*/
- PPC_STL r15, VCPU_GPR(r15)(r4)
- PPC_STL r16, VCPU_GPR(r16)(r4)
- PPC_STL r17, VCPU_GPR(r17)(r4)
- PPC_STL r18, VCPU_GPR(r18)(r4)
- PPC_STL r19, VCPU_GPR(r19)(r4)
- PPC_STL r20, VCPU_GPR(r20)(r4)
- PPC_STL r21, VCPU_GPR(r21)(r4)
- PPC_STL r22, VCPU_GPR(r22)(r4)
- PPC_STL r23, VCPU_GPR(r23)(r4)
- PPC_STL r24, VCPU_GPR(r24)(r4)
- PPC_STL r25, VCPU_GPR(r25)(r4)
- PPC_STL r26, VCPU_GPR(r26)(r4)
- PPC_STL r27, VCPU_GPR(r27)(r4)
- PPC_STL r28, VCPU_GPR(r28)(r4)
- PPC_STL r29, VCPU_GPR(r29)(r4)
- PPC_STL r30, VCPU_GPR(r30)(r4)
- PPC_STL r31, VCPU_GPR(r31)(r4)
+ PPC_STL r15, VCPU_GPR(R15)(r4)
+ PPC_STL r16, VCPU_GPR(R16)(r4)
+ PPC_STL r17, VCPU_GPR(R17)(r4)
+ PPC_STL r18, VCPU_GPR(R18)(r4)
+ PPC_STL r19, VCPU_GPR(R19)(r4)
+ PPC_STL r20, VCPU_GPR(R20)(r4)
+ PPC_STL r21, VCPU_GPR(R21)(r4)
+ PPC_STL r22, VCPU_GPR(R22)(r4)
+ PPC_STL r23, VCPU_GPR(R23)(r4)
+ PPC_STL r24, VCPU_GPR(R24)(r4)
+ PPC_STL r25, VCPU_GPR(R25)(r4)
+ PPC_STL r26, VCPU_GPR(R26)(r4)
+ PPC_STL r27, VCPU_GPR(R27)(r4)
+ PPC_STL r28, VCPU_GPR(R28)(r4)
+ PPC_STL r29, VCPU_GPR(R29)(r4)
+ PPC_STL r30, VCPU_GPR(R30)(r4)
+ PPC_STL r31, VCPU_GPR(R31)(r4)
/* Load host non-volatile register state from host stack. */
PPC_LL r14, HOST_NV_GPR(r14)(r1)
@@ -478,24 +473,24 @@ _GLOBAL(__kvmppc_vcpu_run)
PPC_STL r31, HOST_NV_GPR(r31)(r1)
/* Load guest non-volatiles. */
- PPC_LL r14, VCPU_GPR(r14)(r4)
- PPC_LL r15, VCPU_GPR(r15)(r4)
- PPC_LL r16, VCPU_GPR(r16)(r4)
- PPC_LL r17, VCPU_GPR(r17)(r4)
- PPC_LL r18, VCPU_GPR(r18)(r4)
- PPC_LL r19, VCPU_GPR(r19)(r4)
- PPC_LL r20, VCPU_GPR(r20)(r4)
- PPC_LL r21, VCPU_GPR(r21)(r4)
- PPC_LL r22, VCPU_GPR(r22)(r4)
- PPC_LL r23, VCPU_GPR(r23)(r4)
- PPC_LL r24, VCPU_GPR(r24)(r4)
- PPC_LL r25, VCPU_GPR(r25)(r4)
- PPC_LL r26, VCPU_GPR(r26)(r4)
- PPC_LL r27, VCPU_GPR(r27)(r4)
- PPC_LL r28, VCPU_GPR(r28)(r4)
- PPC_LL r29, VCPU_GPR(r29)(r4)
- PPC_LL r30, VCPU_GPR(r30)(r4)
- PPC_LL r31, VCPU_GPR(r31)(r4)
+ PPC_LL r14, VCPU_GPR(R14)(r4)
+ PPC_LL r15, VCPU_GPR(R15)(r4)
+ PPC_LL r16, VCPU_GPR(R16)(r4)
+ PPC_LL r17, VCPU_GPR(R17)(r4)
+ PPC_LL r18, VCPU_GPR(R18)(r4)
+ PPC_LL r19, VCPU_GPR(R19)(r4)
+ PPC_LL r20, VCPU_GPR(R20)(r4)
+ PPC_LL r21, VCPU_GPR(R21)(r4)
+ PPC_LL r22, VCPU_GPR(R22)(r4)
+ PPC_LL r23, VCPU_GPR(R23)(r4)
+ PPC_LL r24, VCPU_GPR(R24)(r4)
+ PPC_LL r25, VCPU_GPR(R25)(r4)
+ PPC_LL r26, VCPU_GPR(R26)(r4)
+ PPC_LL r27, VCPU_GPR(R27)(r4)
+ PPC_LL r28, VCPU_GPR(R28)(r4)
+ PPC_LL r29, VCPU_GPR(R29)(r4)
+ PPC_LL r30, VCPU_GPR(R30)(r4)
+ PPC_LL r31, VCPU_GPR(R31)(r4)
lightweight_exit:
@@ -554,13 +549,13 @@ lightweight_exit:
lwz r7, VCPU_CR(r4)
PPC_LL r8, VCPU_PC(r4)
PPC_LD(r9, VCPU_SHARED_MSR, r11)
- PPC_LL r0, VCPU_GPR(r0)(r4)
- PPC_LL r1, VCPU_GPR(r1)(r4)
- PPC_LL r2, VCPU_GPR(r2)(r4)
- PPC_LL r10, VCPU_GPR(r10)(r4)
- PPC_LL r11, VCPU_GPR(r11)(r4)
- PPC_LL r12, VCPU_GPR(r12)(r4)
- PPC_LL r13, VCPU_GPR(r13)(r4)
+ PPC_LL r0, VCPU_GPR(R0)(r4)
+ PPC_LL r1, VCPU_GPR(R1)(r4)
+ PPC_LL r2, VCPU_GPR(R2)(r4)
+ PPC_LL r10, VCPU_GPR(R10)(r4)
+ PPC_LL r11, VCPU_GPR(R11)(r4)
+ PPC_LL r12, VCPU_GPR(R12)(r4)
+ PPC_LL r13, VCPU_GPR(R13)(r4)
mtlr r3
mtxer r5
mtctr r6
@@ -586,12 +581,12 @@ lightweight_exit:
mtcr r7
/* Finish loading guest volatiles and jump to guest. */
- PPC_LL r5, VCPU_GPR(r5)(r4)
- PPC_LL r6, VCPU_GPR(r6)(r4)
- PPC_LL r7, VCPU_GPR(r7)(r4)
- PPC_LL r8, VCPU_GPR(r8)(r4)
- PPC_LL r9, VCPU_GPR(r9)(r4)
-
- PPC_LL r3, VCPU_GPR(r3)(r4)
- PPC_LL r4, VCPU_GPR(r4)(r4)
+ PPC_LL r5, VCPU_GPR(R5)(r4)
+ PPC_LL r6, VCPU_GPR(R6)(r4)
+ PPC_LL r7, VCPU_GPR(R7)(r4)
+ PPC_LL r8, VCPU_GPR(R8)(r4)
+ PPC_LL r9, VCPU_GPR(R9)(r4)
+
+ PPC_LL r3, VCPU_GPR(R3)(r4)
+ PPC_LL r4, VCPU_GPR(R4)(r4)
rfi