aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/powerpc/kvm/emulate_loadstore.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/emulate_loadstore.c')
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c37
1 files changed, 19 insertions, 18 deletions
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index cfc9114b87d0..ec60c7979718 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -28,7 +28,7 @@
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
- kvmppc_core_queue_fpunavail(vcpu);
+ kvmppc_core_queue_fpunavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
return true;
}
@@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
- kvmppc_core_queue_vsx_unavail(vcpu);
+ kvmppc_core_queue_vsx_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
return true;
}
@@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
- kvmppc_core_queue_vec_unavail(vcpu);
+ kvmppc_core_queue_vec_unavail(vcpu, kvmppc_get_msr(vcpu) & SRR1_PREFIXED);
return true;
}
@@ -71,7 +71,7 @@ static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
*/
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{
- u32 inst;
+ ppc_inst_t inst;
enum emulation_result emulated = EMULATE_FAIL;
struct instruction_op op;
@@ -92,8 +92,8 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_host_swabbed = 0;
emulated = EMULATE_FAIL;
- vcpu->arch.regs.msr = vcpu->arch.shared->msr;
- if (analyse_instr(&op, &vcpu->arch.regs, ppc_inst(inst)) == 0) {
+ vcpu->arch.regs.msr = kvmppc_get_msr(vcpu);
+ if (analyse_instr(&op, &vcpu->arch.regs, inst) == 0) {
int type = op.type & INSTR_TYPE_MASK;
int size = GETSIZE(op.type);
@@ -111,7 +111,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
op.reg, size, !instr_byte_swap);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
- kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
+ kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
break;
}
@@ -131,7 +131,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
KVM_MMIO_REG_FPR|op.reg, size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
- kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
+ kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
break;
#endif
@@ -223,16 +223,17 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
break;
}
#endif
- case STORE:
- /* if need byte reverse, op.val has been reversed by
- * analyse_instr().
- */
- emulated = kvmppc_handle_store(vcpu, op.val, size, 1);
+ case STORE: {
+ int instr_byte_swap = op.type & BYTEREV;
+
+ emulated = kvmppc_handle_store(vcpu, kvmppc_get_gpr(vcpu, op.reg),
+ size, !instr_byte_swap);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
- kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
+ kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
break;
+ }
#ifdef CONFIG_PPC_FPU
case STORE_FP:
if (kvmppc_check_fp_disabled(vcpu))
@@ -250,10 +251,10 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 1;
emulated = kvmppc_handle_store(vcpu,
- VCPU_FPR(vcpu, op.reg), size, 1);
+ kvmppc_get_fpr(vcpu, op.reg), size, 1);
if ((op.type & UPDATE) && (emulated != EMULATE_FAIL))
- kvmppc_set_gpr(vcpu, op.update_reg, op.ea);
+ kvmppc_set_gpr(vcpu, op.update_reg, vcpu->arch.vaddr_accessed);
break;
#endif
@@ -356,11 +357,11 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
}
- trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
+ trace_kvm_ppc_instr(ppc_inst_val(inst), kvmppc_get_pc(vcpu), emulated);
/* Advance past emulated instruction. */
if (emulated != EMULATE_FAIL)
- kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
+ kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + ppc_inst_len(inst));
return emulated;
}