aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/x86/kvm/vmx/vmenter.S
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-03-26 09:07:12 -0700
committerPaolo Bonzini <pbonzini@redhat.com>2020-03-31 10:48:11 -0400
commit842f4be95899df22b5843ba1a7c8cf37e831a6e8 (patch)
tree8691ddc1edcba65b5cb7bbc7a7ad4daf81b4bea5 /arch/x86/kvm/vmx/vmenter.S
parentKVM: SVM: Annotate svm_x86_ops as __initdata (diff)
downloadwireguard-linux-842f4be95899df22b5843ba1a7c8cf37e831a6e8.tar.xz
wireguard-linux-842f4be95899df22b5843ba1a7c8cf37e831a6e8.zip
KVM: VMX: Add a trampoline to fix VMREAD error handling
Add a hand coded assembly trampoline to preserve volatile registers across vmread_error(), and to handle the calling convention differences between 64-bit and 32-bit due to asmlinkage on vmread_error(). Pass @field and @fault on the stack when invoking the trampoline to avoid clobbering volatile registers in the context of the inline assembly. Calling vmread_error() directly from inline assembly is partially broken on 64-bit, and completely broken on 32-bit. On 64-bit, it will clobber %rdi and %rsi (used to pass @field and @fault) and any volatile regs written by vmread_error(). On 32-bit, asmlinkage means vmread_error() expects the parameters to be passed on the stack, not via regs. Opportunistically zero out the result in the trampoline to save a few bytes of code for every VMREAD. A happy side effect of the trampoline is that the inline code footprint is reduced by three bytes on 64-bit due to PUSH/POP being more efficent (in terms of opcode bytes) than MOV. Fixes: 6e2020977e3e6 ("KVM: VMX: Add error handling to VMREAD helper") Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200326160712.28803-1-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx/vmenter.S')
-rw-r--r--arch/x86/kvm/vmx/vmenter.S58
1 files changed, 58 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index ca2065166d1d..9651ba388ba9 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -234,3 +234,61 @@ SYM_FUNC_START(__vmx_vcpu_run)
2: mov $1, %eax
jmp 1b
SYM_FUNC_END(__vmx_vcpu_run)
+
+/**
+ * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
+ * @field: VMCS field encoding that failed
+ * @fault: %true if the VMREAD faulted, %false if it failed
+
+ * Save and restore volatile registers across a call to vmread_error(). Note,
+ * all parameters are passed on the stack.
+ */
+SYM_FUNC_START(vmread_error_trampoline)
+ push %_ASM_BP
+ mov %_ASM_SP, %_ASM_BP
+
+ push %_ASM_AX
+ push %_ASM_CX
+ push %_ASM_DX
+#ifdef CONFIG_X86_64
+ push %rdi
+ push %rsi
+ push %r8
+ push %r9
+ push %r10
+ push %r11
+#endif
+#ifdef CONFIG_X86_64
+ /* Load @field and @fault to arg1 and arg2 respectively. */
+ mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
+ mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
+#else
+ /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
+ push 3*WORD_SIZE(%ebp)
+ push 2*WORD_SIZE(%ebp)
+#endif
+
+ call vmread_error
+
+#ifndef CONFIG_X86_64
+ add $8, %esp
+#endif
+
+ /* Zero out @fault, which will be popped into the result register. */
+ _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
+
+#ifdef CONFIG_X86_64
+ pop %r11
+ pop %r10
+ pop %r9
+ pop %r8
+ pop %rsi
+ pop %rdi
+#endif
+ pop %_ASM_DX
+ pop %_ASM_CX
+ pop %_ASM_AX
+ pop %_ASM_BP
+
+ ret
+SYM_FUNC_END(vmread_error_trampoline)