aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorBinbin Wu <binbin.wu@linux.intel.com>2025-02-22 09:42:18 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2025-03-14 14:20:54 -0400
commit44428e4936022a7a31743017849b167e64f33a32 (patch)
treece208f8baea8e5ce8072f559f508e333dec5041d
parentKVM: x86: Have ____kvm_emulate_hypercall() read the GPRs (diff)
downloadwireguard-linux-44428e4936022a7a31743017849b167e64f33a32.tar.xz
wireguard-linux-44428e4936022a7a31743017849b167e64f33a32.zip
KVM: x86: Move pv_unhalted check out of kvm_vcpu_has_events()
Move pv_unhalted check out of kvm_vcpu_has_events(), check pv_unhalted explicitly when handling PV unhalt and expose kvm_vcpu_has_events(). kvm_vcpu_has_events() returns true if pv_unhalted is set, and pv_unhalted is only cleared on transitions to KVM_MP_STATE_RUNNABLE. If the guest initiates a spurious wakeup, pv_unhalted could be left set in perpetuity. Currently, this is not problematic because kvm_vcpu_has_events() is only called when handling PV unhalt. However, if kvm_vcpu_has_events() is used for other purposes in the future, it could return the unexpected results. Export kvm_vcpu_has_events() for its usage in broader contexts. Suggested-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com> Message-ID: <20250222014225.897298-3-binbin.wu@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/x86.c11
-rw-r--r--include/linux/kvm_host.h1
2 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a792207a0dd1..3cae210ffaa4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -11143,7 +11143,7 @@ static bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
!vcpu->arch.apf.halted);
}
-static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
+bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
{
if (!list_empty_careful(&vcpu->async_pf.done))
return true;
@@ -11152,9 +11152,6 @@ static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
kvm_apic_init_sipi_allowed(vcpu))
return true;
- if (vcpu->arch.pv.pv_unhalted)
- return true;
-
if (kvm_is_exception_pending(vcpu))
return true;
@@ -11192,10 +11189,12 @@ static bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu)
return false;
}
+EXPORT_SYMBOL_GPL(kvm_vcpu_has_events);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
{
- return kvm_vcpu_running(vcpu) || kvm_vcpu_has_events(vcpu);
+ return kvm_vcpu_running(vcpu) || vcpu->arch.pv.pv_unhalted ||
+ kvm_vcpu_has_events(vcpu);
}
/* Called within kvm->srcu read side. */
@@ -11331,7 +11330,7 @@ static int __kvm_emulate_halt(struct kvm_vcpu *vcpu, int state, int reason)
*/
++vcpu->stat.halt_exits;
if (lapic_in_kernel(vcpu)) {
- if (kvm_vcpu_has_events(vcpu))
+ if (kvm_vcpu_has_events(vcpu) || vcpu->arch.pv.pv_unhalted)
vcpu->arch.pv.pv_unhalted = false;
else
vcpu->arch.mp_state = state;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3bfe3140f444..ed1968f6f841 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1609,6 +1609,7 @@ void kvm_arch_disable_virtualization(void);
int kvm_arch_enable_virtualization_cpu(void);
void kvm_arch_disable_virtualization_cpu(void);
#endif
+bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);