aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/kvm.c
diff options
context:
space:
mode:
authorVitaly Kuznetsov <vkuznets@redhat.com>2021-04-14 14:35:41 +0200
committerPaolo Bonzini <pbonzini@redhat.com>2021-05-07 06:05:36 -0400
commit8b79feffeca28c5459458fe78676b081e87c93a4 (patch)
tree3a57061527094f12520280e68a545ac85963272d /arch/x86/kernel/kvm.c
parentx86/kvm: Fix pr_info() for async PF setup/teardown (diff)
downloadlinux-dev-8b79feffeca28c5459458fe78676b081e87c93a4.tar.xz
linux-dev-8b79feffeca28c5459458fe78676b081e87c93a4.zip
x86/kvm: Teardown PV features on boot CPU as well
Various PV features (Async PF, PV EOI, steal time) work through memory shared with hypervisor and when we restore from hibernation we must properly teardown all these features to make sure hypervisor doesn't write to stale locations after we jump to the previously hibernated kernel (which can try to place anything there). For secondary CPUs the job is already done by kvm_cpu_down_prepare(), register syscore ops to do the same for boot CPU. Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com> Message-Id: <20210414123544.1060604-3-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kernel/kvm.c')
-rw-r--r--arch/x86/kernel/kvm.c56
1 files changed, 40 insertions, 16 deletions
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index dc440bb69222..9d5f96321c7f 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -26,6 +26,7 @@
#include <linux/kprobes.h>
#include <linux/nmi.h>
#include <linux/swait.h>
+#include <linux/syscore_ops.h>
#include <asm/timer.h>
#include <asm/cpu.h>
#include <asm/traps.h>
@@ -451,6 +452,25 @@ static void __init sev_map_percpu_data(void)
}
}
+static void kvm_guest_cpu_offline(void)
+{
+ kvm_disable_steal_time();
+ if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
+ wrmsrl(MSR_KVM_PV_EOI_EN, 0);
+ kvm_pv_disable_apf();
+ apf_task_wake_all();
+}
+
+static int kvm_cpu_online(unsigned int cpu)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ kvm_guest_cpu_init();
+ local_irq_restore(flags);
+ return 0;
+}
+
#ifdef CONFIG_SMP
static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
@@ -635,32 +655,34 @@ static void __init kvm_smp_prepare_boot_cpu(void)
kvm_spinlock_init();
}
-static void kvm_guest_cpu_offline(void)
+static int kvm_cpu_down_prepare(unsigned int cpu)
{
- kvm_disable_steal_time();
- if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
- wrmsrl(MSR_KVM_PV_EOI_EN, 0);
- kvm_pv_disable_apf();
- apf_task_wake_all();
-}
+ unsigned long flags;
-static int kvm_cpu_online(unsigned int cpu)
-{
- local_irq_disable();
- kvm_guest_cpu_init();
- local_irq_enable();
+ local_irq_save(flags);
+ kvm_guest_cpu_offline();
+ local_irq_restore(flags);
return 0;
}
-static int kvm_cpu_down_prepare(unsigned int cpu)
+#endif
+
+static int kvm_suspend(void)
{
- local_irq_disable();
kvm_guest_cpu_offline();
- local_irq_enable();
+
return 0;
}
-#endif
+static void kvm_resume(void)
+{
+ kvm_cpu_online(raw_smp_processor_id());
+}
+
+static struct syscore_ops kvm_syscore_ops = {
+ .suspend = kvm_suspend,
+ .resume = kvm_resume,
+};
static void __init kvm_guest_init(void)
{
@@ -704,6 +726,8 @@ static void __init kvm_guest_init(void)
kvm_guest_cpu_init();
#endif
+ register_syscore_ops(&kvm_syscore_ops);
+
/*
* Hard lockup detection is enabled by default. Disable it, as guests
* can get false positives too easily, for example if the host is