aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/va_layout.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/va_layout.c')
-rw-r--r--arch/arm64/kvm/va_layout.c104
1 files changed, 85 insertions, 19 deletions
diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c
index e0404bcab019..d8cc51bd60bf 100644
--- a/arch/arm64/kvm/va_layout.c
+++ b/arch/arm64/kvm/va_layout.c
@@ -11,6 +11,7 @@
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/kvm_mmu.h>
+#include <asm/memory.h>
/*
* The LSB of the HYP VA tag
@@ -23,6 +24,30 @@ static u64 tag_val;
static u64 va_mask;
/*
+ * Compute HYP VA by using the same computation as kern_hyp_va().
+ */
+static u64 __early_kern_hyp_va(u64 addr)
+{
+ addr &= va_mask;
+ addr |= tag_val << tag_lsb;
+ return addr;
+}
+
+/*
+ * Store a hyp VA <-> PA offset into a hyp-owned variable.
+ */
+static void init_hyp_physvirt_offset(void)
+{
+ extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
+ u64 kern_va, hyp_va;
+
+ /* Compute the offset from the hyp VA and PA of a random symbol. */
+ kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
+ hyp_va = __early_kern_hyp_va(kern_va);
+ CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
+}
+
+/*
* We want to generate a hyp VA with the following format (with V ==
* vabits_actual):
*
@@ -53,6 +78,8 @@ __init void kvm_compute_layout(void)
tag_val |= get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb);
}
tag_val >>= tag_lsb;
+
+ init_hyp_physvirt_offset();
}
static u32 compute_instruction(int n, u32 rd, u32 rn)
@@ -131,28 +158,21 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
}
}
-void *__kvm_bp_vect_base;
-int __kvm_harden_el2_vector_slot;
-
void kvm_patch_vector_branch(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst)
{
u64 addr;
u32 insn;
- BUG_ON(nr_inst != 5);
+ BUG_ON(nr_inst != 4);
- if (has_vhe() || !cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
- WARN_ON_ONCE(cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS));
+ if (!cpus_have_const_cap(ARM64_SPECTRE_V3A) || WARN_ON_ONCE(has_vhe()))
return;
- }
/*
* Compute HYP VA by using the same computation as kern_hyp_va()
*/
- addr = (uintptr_t)kvm_ksym_ref(__kvm_hyp_vector);
- addr &= va_mask;
- addr |= tag_val << tag_lsb;
+ addr = __early_kern_hyp_va((u64)kvm_ksym_ref(__kvm_hyp_vector));
/* Use PC[10:7] to branch to the same vector in KVM */
addr |= ((u64)origptr & GENMASK_ULL(10, 7));
@@ -163,15 +183,6 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
*/
addr += KVM_VECTOR_PREAMBLE;
- /* stp x0, x1, [sp, #-16]! */
- insn = aarch64_insn_gen_load_store_pair(AARCH64_INSN_REG_0,
- AARCH64_INSN_REG_1,
- AARCH64_INSN_REG_SP,
- -16,
- AARCH64_INSN_VARIANT_64BIT,
- AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX);
- *updptr++ = cpu_to_le32(insn);
-
/* movz x0, #(addr & 0xffff) */
insn = aarch64_insn_gen_movewide(AARCH64_INSN_REG_0,
(u16)addr,
@@ -201,3 +212,58 @@ void kvm_patch_vector_branch(struct alt_instr *alt,
AARCH64_INSN_BRANCH_NOLINK);
*updptr++ = cpu_to_le32(insn);
}
+
+static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ u32 insn, oinsn, rd;
+
+ BUG_ON(nr_inst != 4);
+
+ /* Compute target register */
+ oinsn = le32_to_cpu(*origptr);
+ rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, oinsn);
+
+ /* movz rd, #(val & 0xffff) */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)val,
+ 0,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_ZERO);
+ *updptr++ = cpu_to_le32(insn);
+
+ /* movk rd, #((val >> 16) & 0xffff), lsl #16 */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)(val >> 16),
+ 16,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_KEEP);
+ *updptr++ = cpu_to_le32(insn);
+
+ /* movk rd, #((val >> 32) & 0xffff), lsl #32 */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)(val >> 32),
+ 32,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_KEEP);
+ *updptr++ = cpu_to_le32(insn);
+
+ /* movk rd, #((val >> 48) & 0xffff), lsl #48 */
+ insn = aarch64_insn_gen_movewide(rd,
+ (u16)(val >> 48),
+ 48,
+ AARCH64_INSN_VARIANT_64BIT,
+ AARCH64_INSN_MOVEWIDE_KEEP);
+ *updptr++ = cpu_to_le32(insn);
+}
+
+void kvm_update_kimg_phys_offset(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst);
+}
+
+void kvm_get_kimage_voffset(struct alt_instr *alt,
+ __le32 *origptr, __le32 *updptr, int nr_inst)
+{
+ generate_mov_q(kimage_voffset, origptr, updptr, nr_inst);
+}