aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/emulate_loadstore.c
diff options
context:
space:
mode:
authorSimon Guo <wei.guo.simon@gmail.com>2018-05-21 13:24:26 +0800
committerPaul Mackerras <paulus@ozlabs.org>2018-05-22 19:53:00 +1000
commitacc9eb9305fecd958e2877c4e6cd3284d01c2e82 (patch)
tree679cf4c57c1860592d92047eb38d83e2b458a95d /arch/powerpc/kvm/emulate_loadstore.c
parentKVM: PPC: Expand mmio_vsx_copy_type to cover VMX load/store element types (diff)
downloadlinux-dev-acc9eb9305fecd958e2877c4e6cd3284d01c2e82.tar.xz
linux-dev-acc9eb9305fecd958e2877c4e6cd3284d01c2e82.zip
KVM: PPC: Reimplement LOAD_VMX/STORE_VMX instruction mmio emulation with analyse_instr() input
This patch reimplements LOAD_VMX/STORE_VMX MMIO emulation with analyse_instr() input. When emulating the store, the VMX reg will need to be flushed so that the right reg val can be retrieved before writing to IO MEM. This patch also adds support for lvebx/lvehx/lvewx/stvebx/stvehx/stvewx MMIO emulation. To meet the requirement of handling different element sizes, kvmppc_handle_load128_by2x64()/kvmppc_handle_store128_by2x64() were replaced with kvmppc_handle_vmx_load()/kvmppc_handle_vmx_store(). The framework used is similar to VSX instruction MMIO emulation. Suggested-by: Paul Mackerras <paulus@ozlabs.org> Signed-off-by: Simon Guo <wei.guo.simon@gmail.com> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm/emulate_loadstore.c')
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c124
1 files changed, 83 insertions, 41 deletions
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 324cfbfa6fa2..afde788be141 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -113,6 +113,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
vcpu->arch.mmio_sp64_extend = 0;
vcpu->arch.mmio_sign_extend = 0;
vcpu->arch.mmio_vmx_copy_nums = 0;
+ vcpu->arch.mmio_vmx_offset = 0;
vcpu->arch.mmio_host_swabbed = 0;
emulated = EMULATE_FAIL;
@@ -158,6 +159,46 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
break;
#endif
+#ifdef CONFIG_ALTIVEC
+ case LOAD_VMX:
+ if (kvmppc_check_altivec_disabled(vcpu))
+ return EMULATE_DONE;
+
+ /* Hardware enforces alignment of VMX accesses */
+ vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
+ vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
+
+ if (size == 16) { /* lvx */
+ vcpu->arch.mmio_copy_type =
+ KVMPPC_VMX_COPY_DWORD;
+ } else if (size == 4) { /* lvewx */
+ vcpu->arch.mmio_copy_type =
+ KVMPPC_VMX_COPY_WORD;
+ } else if (size == 2) { /* lvehx */
+ vcpu->arch.mmio_copy_type =
+ KVMPPC_VMX_COPY_HWORD;
+ } else if (size == 1) { /* lvebx */
+ vcpu->arch.mmio_copy_type =
+ KVMPPC_VMX_COPY_BYTE;
+ } else
+ break;
+
+ vcpu->arch.mmio_vmx_offset =
+ (vcpu->arch.vaddr_accessed & 0xf)/size;
+
+ if (size == 16) {
+ vcpu->arch.mmio_vmx_copy_nums = 2;
+ emulated = kvmppc_handle_vmx_load(run,
+ vcpu, KVM_MMIO_REG_VMX|op.reg,
+ 8, 1);
+ } else {
+ vcpu->arch.mmio_vmx_copy_nums = 1;
+ emulated = kvmppc_handle_vmx_load(run, vcpu,
+ KVM_MMIO_REG_VMX|op.reg,
+ size, 1);
+ }
+ break;
+#endif
#ifdef CONFIG_VSX
case LOAD_VSX: {
int io_size_each;
@@ -241,6 +282,48 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
break;
#endif
+#ifdef CONFIG_ALTIVEC
+ case STORE_VMX:
+ if (kvmppc_check_altivec_disabled(vcpu))
+ return EMULATE_DONE;
+
+ /* Hardware enforces alignment of VMX accesses. */
+ vcpu->arch.vaddr_accessed &= ~((unsigned long)size - 1);
+ vcpu->arch.paddr_accessed &= ~((unsigned long)size - 1);
+
+ if (vcpu->kvm->arch.kvm_ops->giveup_ext)
+ vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu,
+ MSR_VEC);
+ if (size == 16) { /* stvx */
+ vcpu->arch.mmio_copy_type =
+ KVMPPC_VMX_COPY_DWORD;
+ } else if (size == 4) { /* stvewx */
+ vcpu->arch.mmio_copy_type =
+ KVMPPC_VMX_COPY_WORD;
+ } else if (size == 2) { /* stvehx */
+ vcpu->arch.mmio_copy_type =
+ KVMPPC_VMX_COPY_HWORD;
+ } else if (size == 1) { /* stvebx */
+ vcpu->arch.mmio_copy_type =
+ KVMPPC_VMX_COPY_BYTE;
+ } else
+ break;
+
+ vcpu->arch.mmio_vmx_offset =
+ (vcpu->arch.vaddr_accessed & 0xf)/size;
+
+ if (size == 16) {
+ vcpu->arch.mmio_vmx_copy_nums = 2;
+ emulated = kvmppc_handle_vmx_store(run,
+ vcpu, op.reg, 8, 1);
+ } else {
+ vcpu->arch.mmio_vmx_copy_nums = 1;
+ emulated = kvmppc_handle_vmx_store(run,
+ vcpu, op.reg, size, 1);
+ }
+
+ break;
+#endif
#ifdef CONFIG_VSX
case STORE_VSX: {
int io_size_each;
@@ -298,47 +381,6 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
}
}
-
- if ((emulated == EMULATE_DONE) || (emulated == EMULATE_DO_MMIO))
- goto out;
-
- switch (get_op(inst)) {
- case 31:
- switch (get_xop(inst)) {
-#ifdef CONFIG_ALTIVEC
- case OP_31_XOP_LVX:
- if (kvmppc_check_altivec_disabled(vcpu))
- return EMULATE_DONE;
- vcpu->arch.vaddr_accessed &= ~0xFULL;
- vcpu->arch.paddr_accessed &= ~0xFULL;
- vcpu->arch.mmio_vmx_copy_nums = 2;
- emulated = kvmppc_handle_load128_by2x64(run, vcpu,
- KVM_MMIO_REG_VMX|rt, 1);
- break;
-
- case OP_31_XOP_STVX:
- if (kvmppc_check_altivec_disabled(vcpu))
- return EMULATE_DONE;
- vcpu->arch.vaddr_accessed &= ~0xFULL;
- vcpu->arch.paddr_accessed &= ~0xFULL;
- vcpu->arch.mmio_vmx_copy_nums = 2;
- emulated = kvmppc_handle_store128_by2x64(run, vcpu,
- rs, 1);
- break;
-#endif /* CONFIG_ALTIVEC */
-
- default:
- emulated = EMULATE_FAIL;
- break;
- }
- break;
-
- default:
- emulated = EMULATE_FAIL;
- break;
- }
-
-out:
if (emulated == EMULATE_FAIL) {
advance = 0;
kvmppc_core_queue_program(vcpu, 0);