aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorSean Christopherson <seanjc@google.com>2021-11-11 02:07:31 +0000
committerPeter Zijlstra <peterz@infradead.org>2021-11-17 14:49:09 +0100
commit73cd107b9685c5308e864061772e4a78a629e4a0 (patch)
tree2bbab62a6e54c2e06356ea98f1828d37eb914e59 /arch/x86/kvm
parentperf/core: Use static_call to optimize perf_guest_info_callbacks (diff)
downloadlinux-dev-73cd107b9685c5308e864061772e4a78a629e4a0.tar.xz
linux-dev-73cd107b9685c5308e864061772e4a78a629e4a0.zip
KVM: x86: Drop current_vcpu for kvm_running_vcpu + kvm_arch_vcpu variable
Use the generic kvm_running_vcpu plus a new 'handling_intr_from_guest' variable in kvm_arch_vcpu instead of the semi-redundant current_vcpu. kvm_before/after_interrupt() must be called while the vCPU is loaded, (which protects against preemption), thus kvm_running_vcpu is guaranteed to be non-NULL when handling_intr_from_guest is non-zero. Switching to kvm_get_running_vcpu() will allows moving KVM's perf callbacks to generic code, and the new flag will be used in a future patch to more precisely identify the "NMI from guest" case. Signed-off-by: Sean Christopherson <seanjc@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Link: https://lore.kernel.org/r/20211111020738.2512932-11-seanjc@google.com
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/pmu.c2
-rw-r--r--arch/x86/kvm/x86.c21
-rw-r--r--arch/x86/kvm/x86.h10
3 files changed, 19 insertions, 14 deletions
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index b2520b3e9e89..0c2133eb4cf6 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -87,7 +87,7 @@ static void kvm_perf_overflow_intr(struct perf_event *perf_event,
* woken up. So we should wake it, but this is impossible from
* NMI context. Do it from irq work instead.
*/
- if (!kvm_guest_state())
+ if (!kvm_handling_nmi_from_guest(pmc->vcpu))
irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
else
kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2011a1cfb42d..bb71e10fdb6a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8469,15 +8469,17 @@ static void kvm_timer_init(void)
kvmclock_cpu_online, kvmclock_cpu_down_prep);
}
-DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu);
-EXPORT_PER_CPU_SYMBOL_GPL(current_vcpu);
+static inline bool kvm_pmi_in_guest(struct kvm_vcpu *vcpu)
+{
+ return vcpu && vcpu->arch.handling_intr_from_guest;
+}
-unsigned int kvm_guest_state(void)
+static unsigned int kvm_guest_state(void)
{
- struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
unsigned int state;
- if (!vcpu)
+ if (!kvm_pmi_in_guest(vcpu))
return 0;
state = PERF_GUEST_ACTIVE;
@@ -8489,9 +8491,10 @@ unsigned int kvm_guest_state(void)
static unsigned long kvm_guest_get_ip(void)
{
- struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
- if (WARN_ON_ONCE(!vcpu))
+ /* Retrieving the IP must be guarded by a call to kvm_guest_state(). */
+ if (WARN_ON_ONCE(!kvm_pmi_in_guest(vcpu)))
return 0;
return kvm_rip_read(vcpu);
@@ -8499,10 +8502,10 @@ static unsigned long kvm_guest_get_ip(void)
static unsigned int kvm_handle_intel_pt_intr(void)
{
- struct kvm_vcpu *vcpu = __this_cpu_read(current_vcpu);
+ struct kvm_vcpu *vcpu = kvm_get_running_vcpu();
/* '0' on failure so that the !PT case can use a RET0 static call. */
- if (!vcpu)
+ if (!kvm_pmi_in_guest(vcpu))
return 0;
kvm_make_request(KVM_REQ_PMI, vcpu);
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index ea264c4502e4..d070043fd2e8 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -385,18 +385,20 @@ static inline bool kvm_cstate_in_guest(struct kvm *kvm)
return kvm->arch.cstate_in_guest;
}
-DECLARE_PER_CPU(struct kvm_vcpu *, current_vcpu);
-
static inline void kvm_before_interrupt(struct kvm_vcpu *vcpu)
{
- __this_cpu_write(current_vcpu, vcpu);
+ WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 1);
}
static inline void kvm_after_interrupt(struct kvm_vcpu *vcpu)
{
- __this_cpu_write(current_vcpu, NULL);
+ WRITE_ONCE(vcpu->arch.handling_intr_from_guest, 0);
}
+static inline bool kvm_handling_nmi_from_guest(struct kvm_vcpu *vcpu)
+{
+ return !!vcpu->arch.handling_intr_from_guest;
+}
static inline bool kvm_pat_valid(u64 data)
{