aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/include/asm/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/kvm_host.h')
-rw-r--r--arch/arm64/include/asm/kvm_host.h30
1 files changed, 21 insertions, 9 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index c328191aa202..f656169db8c3 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -19,12 +19,12 @@
#include <asm/arch_gicv3.h>
#include <asm/barrier.h>
#include <asm/cpufeature.h>
+#include <asm/cputype.h>
#include <asm/daifflags.h>
#include <asm/fpsimd.h>
#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
-#include <asm/smp_plat.h>
#include <asm/thread_info.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -484,11 +484,10 @@ struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
DECLARE_PER_CPU(kvm_host_data_t, kvm_host_data);
-static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt,
- int cpu)
+static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
{
/* The host's MPIDR is immutable, so let's set it up at boot time */
- cpu_ctxt->sys_regs[MPIDR_EL1] = cpu_logical_map(cpu);
+ cpu_ctxt->sys_regs[MPIDR_EL1] = read_cpuid_mpidr();
}
void __kvm_enable_ssbs(void);
@@ -597,11 +596,12 @@ static inline void kvm_arm_vhe_guest_enter(void)
* will not signal the CPU of interrupts of lower priority, and the
* only way to get out will be via guest exceptions.
* Naturally, we want to avoid this.
+ *
+ * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
+ * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
*/
- if (system_uses_irq_prio_masking()) {
- gic_write_pmr(GIC_PRIO_IRQON);
+ if (system_uses_irq_prio_masking())
dsb(sy);
- }
}
static inline void kvm_arm_vhe_guest_exit(void)
@@ -620,9 +620,21 @@ static inline void kvm_arm_vhe_guest_exit(void)
isb();
}
-static inline bool kvm_arm_harden_branch_predictor(void)
+#define KVM_BP_HARDEN_UNKNOWN -1
+#define KVM_BP_HARDEN_WA_NEEDED 0
+#define KVM_BP_HARDEN_NOT_REQUIRED 1
+
+static inline int kvm_arm_harden_branch_predictor(void)
{
- return cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR);
+ switch (get_spectre_v2_workaround_state()) {
+ case ARM64_BP_HARDEN_WA_NEEDED:
+ return KVM_BP_HARDEN_WA_NEEDED;
+ case ARM64_BP_HARDEN_NOT_REQUIRED:
+ return KVM_BP_HARDEN_NOT_REQUIRED;
+ case ARM64_BP_HARDEN_UNKNOWN:
+ default:
+ return KVM_BP_HARDEN_UNKNOWN;
+ }
}
#define KVM_SSBD_UNKNOWN -1