aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2018-08-23 13:56:47 -0700
committerRadim Krčmář <rkrcmar@redhat.com>2018-08-30 16:20:43 +0200
commit35be0aded76b54a24dc8aa678a71bca22273e8d8 (patch)
tree15f666c5fa16471f9ee542aa97dde24b4925d294 /arch/x86/include/asm/kvm_host.h
parentKVM: VMX: Do not allow reexecute_instruction() when skipping MMIO instr (diff)
downloadlinux-dev-35be0aded76b54a24dc8aa678a71bca22273e8d8.tar.xz
linux-dev-35be0aded76b54a24dc8aa678a71bca22273e8d8.zip
KVM: x86: SVM: Set EMULTYPE_NO_REEXECUTE for RSM emulation
Re-execution after an emulation decode failure is only intended to handle a case where two or vCPUs race to write a shadowed page, i.e. we should never re-execute an instruction as part of RSM emulation. Add a new helper, kvm_emulate_instruction_from_buffer(), to support emulating from a pre-defined buffer. This eliminates the last direct call to x86_emulate_instruction() outside of kvm_mmu_page_fault(), which means x86_emulate_instruction() can be unexported in a future patch. Fixes: 7607b7174405 ("KVM: SVM: install RSM intercept") Cc: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Cc: stable@vger.kernel.org Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 00ddb0c9e612..3b220b86c8e4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1251,6 +1251,13 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
emulation_type | EMULTYPE_NO_REEXECUTE, NULL, 0);
}
+static inline int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
+ void *insn, int insn_len)
+{
+ return x86_emulate_instruction(vcpu, 0, EMULTYPE_NO_REEXECUTE,
+ insn, insn_len);
+}
+
void kvm_enable_efer_bits(u64);
bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
int kvm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);