aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/x86/kvm/vmx/vmenter.S
diff options
context:
space:
mode:
authorJosh Poimboeuf <jpoimboe@kernel.org>2022-06-14 23:16:12 +0200
committerBorislav Petkov <bp@suse.de>2022-06-27 10:34:00 +0200
commitbb06650634d3552c0f8557e9d16aa1a408040e28 (patch)
tree3840adee1a272f68fd511f13c29186642da388f7 /arch/x86/kvm/vmx/vmenter.S
parentKVM: VMX: Flatten __vmx_vcpu_run() (diff)
downloadwireguard-linux-bb06650634d3552c0f8557e9d16aa1a408040e28.tar.xz
wireguard-linux-bb06650634d3552c0f8557e9d16aa1a408040e28.zip
KVM: VMX: Convert launched argument to flags
Convert __vmx_vcpu_run()'s 'launched' argument to 'flags', in preparation for doing SPEC_CTRL handling immediately after vmexit, which will need another flag. This is much easier than adding a fourth argument, because this code supports both 32-bit and 64-bit, and the fourth argument on 32-bit would have to be pushed on the stack. Note that __vmx_vcpu_run_flags() is called outside of the noinstr critical section because it will soon start calling potentially traceable functions. Signed-off-by: Josh Poimboeuf <jpoimboe@kernel.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Borislav Petkov <bp@suse.de>
Diffstat (limited to 'arch/x86/kvm/vmx/vmenter.S')
-rw-r--r--arch/x86/kvm/vmx/vmenter.S9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S
index c83163fb2e9c..ddc3bf85db33 100644
--- a/arch/x86/kvm/vmx/vmenter.S
+++ b/arch/x86/kvm/vmx/vmenter.S
@@ -5,6 +5,7 @@
#include <asm/kvm_vcpu_regs.h>
#include <asm/nospec-branch.h>
#include <asm/segment.h>
+#include "run_flags.h"
#define WORD_SIZE (BITS_PER_LONG / 8)
@@ -34,7 +35,7 @@
* __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
* @vmx: struct vcpu_vmx * (forwarded to vmx_update_host_rsp)
* @regs: unsigned long * (to guest registers)
- * @launched: %true if the VMCS has been launched
+ * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
*
* Returns:
* 0 on VM-Exit, 1 on VM-Fail
@@ -59,7 +60,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
*/
push %_ASM_ARG2
- /* Copy @launched to BL, _ASM_ARG3 is volatile. */
+ /* Copy @flags to BL, _ASM_ARG3 is volatile. */
mov %_ASM_ARG3B, %bl
lea (%_ASM_SP), %_ASM_ARG2
@@ -69,7 +70,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
mov (%_ASM_SP), %_ASM_AX
/* Check if vmlaunch or vmresume is needed */
- testb %bl, %bl
+ testb $VMX_RUN_VMRESUME, %bl
/* Load guest registers. Don't clobber flags. */
mov VCPU_RCX(%_ASM_AX), %_ASM_CX
@@ -92,7 +93,7 @@ SYM_FUNC_START(__vmx_vcpu_run)
mov VCPU_RAX(%_ASM_AX), %_ASM_AX
/* Check EFLAGS.ZF from 'testb' above */
- je .Lvmlaunch
+ jz .Lvmlaunch
/*
* After a successful VMRESUME/VMLAUNCH, control flow "magically"