aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/kvm.c
diff options
context:
space:
mode:
authorZhenzhong Duan <zhenzhong.duan@oracle.com>2019-10-23 19:16:20 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2020-07-08 16:21:56 -0400
commitde585020daf4e28a308549ed1457a51b3cc5ac92 (patch)
tree3de3e81639560e298f717473a27071f21a979628 /arch/x86/kernel/kvm.c
parentKVM: x86/mmu: Rename page_header() to to_shadow_page() (diff)
downloadlinux-dev-de585020daf4e28a308549ed1457a51b3cc5ac92.tar.xz
linux-dev-de585020daf4e28a308549ed1457a51b3cc5ac92.zip
Revert "KVM: X86: Fix setup the virt_spin_lock_key before static key get initialized"
This reverts commit 34226b6b70980a8f81fff3c09a2c889f77edeeff. Commit 8990cac6e5ea ("x86/jump_label: Initialize static branching early") adds jump_label_init() call in setup_arch() to make static keys initialized early, so we could use the original simpler code again. The similar change for XEN is in commit 090d54bcbc54 ("Revert "x86/paravirt: Set up the virt_spin_lock_key after static keys get initialized"") Signed-off-by: Zhenzhong Duan <zhenzhong.duan@oracle.com> Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Sean Christopherson <sean.j.christopherson@intel.com> Cc: Vitaly Kuznetsov <vkuznets@redhat.com> Cc: Wanpeng Li <wanpengli@tencent.com> Cc: Jim Mattson <jmattson@google.com> Cc: Joerg Roedel <joro@8bytes.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: "H. Peter Anvin" <hpa@zytor.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r--arch/x86/kernel/kvm.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index 681bc4090e91..e4208ce11689 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -570,13 +570,6 @@ static void kvm_smp_send_call_func_ipi(const struct cpumask *mask)
}
}
-static void __init kvm_smp_prepare_cpus(unsigned int max_cpus)
-{
- native_smp_prepare_cpus(max_cpus);
- if (kvm_para_has_hint(KVM_HINTS_REALTIME))
- static_branch_disable(&virt_spin_lock_key);
-}
-
static void __init kvm_smp_prepare_boot_cpu(void)
{
/*
@@ -671,7 +664,6 @@ static void __init kvm_guest_init(void)
}
#ifdef CONFIG_SMP
- smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
if (pv_sched_yield_supported()) {
smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
@@ -879,8 +871,10 @@ void __init kvm_spinlock_init(void)
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
return;
- if (kvm_para_has_hint(KVM_HINTS_REALTIME))
+ if (kvm_para_has_hint(KVM_HINTS_REALTIME)) {
+ static_branch_disable(&virt_spin_lock_key);
return;
+ }
/* Don't use the pvqspinlock code if there is only 1 vCPU. */
if (num_possible_cpus() == 1)