aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/arm64/kvm/hyp/nvhe
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/hyp/nvhe')
-rw-r--r--arch/arm64/kvm/hyp/nvhe/Makefile29
-rw-r--r--arch/arm64/kvm/hyp/nvhe/cache.S11
-rw-r--r--arch/arm64/kvm/hyp/nvhe/debug-sr.c26
-rw-r--r--arch/arm64/kvm/hyp/nvhe/ffa.c780
-rw-r--r--arch/arm64/kvm/hyp/nvhe/host.S57
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-init.S40
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-main.c146
-rw-r--r--arch/arm64/kvm/hyp/nvhe/hyp-smp.c2
-rw-r--r--arch/arm64/kvm/hyp/nvhe/list_debug.c8
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c614
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mm.c250
-rw-r--r--arch/arm64/kvm/hyp/nvhe/page_alloc.c40
-rw-r--r--arch/arm64/kvm/hyp/nvhe/pkvm.c501
-rw-r--r--arch/arm64/kvm/hyp/nvhe/psci-relay.c5
-rw-r--r--arch/arm64/kvm/hyp/nvhe/setup.c136
-rw-r--r--arch/arm64/kvm/hyp/nvhe/stacktrace.c158
-rw-r--r--arch/arm64/kvm/hyp/nvhe/switch.c94
-rw-r--r--arch/arm64/kvm/hyp/nvhe/sys_regs.c18
-rw-r--r--arch/arm64/kvm/hyp/nvhe/timer-sr.c34
-rw-r--r--arch/arm64/kvm/hyp/nvhe/tlb.c123
20 files changed, 2728 insertions, 344 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
index f9fe4dc21b1f..2250253a6429 100644
--- a/arch/arm64/kvm/hyp/nvhe/Makefile
+++ b/arch/arm64/kvm/hyp/nvhe/Makefile
@@ -4,7 +4,15 @@
#
asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
-ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
+
+# Tracepoint and MMIO logging symbols should not be visible at nVHE KVM as
+# there is no way to execute them and any such MMIO access from nVHE KVM
+# will explode instantly (Words of Marc Zyngier). So introduce a generic flag
+# __DISABLE_TRACE_MMIO__ to disable MMIO tracing for nVHE KVM.
+ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS -D__DISABLE_TRACE_MMIO__
+ccflags-y += -fno-stack-protector \
+ -DDISABLE_BRANCH_PROFILING \
+ $(DISABLE_STACKLEAK_PLUGIN)
hostprogs := gen-hyprel
HOST_EXTRACFLAGS += -I$(objtree)/include
@@ -12,13 +20,13 @@ HOST_EXTRACFLAGS += -I$(objtree)/include
lib-objs := clear_page.o copy_page.o memcpy.o memset.o
lib-objs := $(addprefix ../../../lib/, $(lib-objs))
-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
+hyp-obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
hyp-main.o hyp-smp.o psci-relay.o early_alloc.o page_alloc.o \
- cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o
-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
+ cache.o setup.o mm.o mem_protect.o sys_regs.o pkvm.o stacktrace.o ffa.o
+hyp-obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
-obj-y += $(lib-objs)
+hyp-obj-$(CONFIG_LIST_HARDENED) += list_debug.o
+hyp-obj-y += $(lib-objs)
##
## Build rules for compiling nVHE hyp code
@@ -26,9 +34,9 @@ obj-y += $(lib-objs)
## file containing all nVHE hyp code and data.
##
-hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y))
+hyp-obj := $(patsubst %.o,%.nvhe.o,$(hyp-obj-y))
obj-y := kvm_nvhe.o
-extra-y := $(hyp-obj) kvm_nvhe.tmp.o kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o
+targets += $(hyp-obj) kvm_nvhe.tmp.o kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o
# 1) Compile all source files to `.nvhe.o` object files. The file extension
# avoids file name clashes for files shared with VHE.
@@ -84,6 +92,11 @@ quiet_cmd_hypcopy = HYPCOPY $@
# Remove ftrace, Shadow Call Stack, and CFI CFLAGS.
# This is equivalent to the 'notrace', '__noscs', and '__nocfi' annotations.
KBUILD_CFLAGS := $(filter-out $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) $(CC_FLAGS_CFI), $(KBUILD_CFLAGS))
+# Starting from 13.0.0 llvm emits SHT_REL section '.llvm.call-graph-profile'
+# when profile optimization is applied. gen-hyprel does not support SHT_REL and
+# causes a build failure. Remove profile optimization flags.
+KBUILD_CFLAGS := $(filter-out -fprofile-sample-use=% -fprofile-use=%, $(KBUILD_CFLAGS))
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
# KVM nVHE code is run at a different exception code with a different map, so
# compiler instrumentation that inserts callbacks or checks into the code may
diff --git a/arch/arm64/kvm/hyp/nvhe/cache.S b/arch/arm64/kvm/hyp/nvhe/cache.S
index 0c367eb5f4e2..85936c17ae40 100644
--- a/arch/arm64/kvm/hyp/nvhe/cache.S
+++ b/arch/arm64/kvm/hyp/nvhe/cache.S
@@ -12,3 +12,14 @@ SYM_FUNC_START(__pi_dcache_clean_inval_poc)
ret
SYM_FUNC_END(__pi_dcache_clean_inval_poc)
SYM_FUNC_ALIAS(dcache_clean_inval_poc, __pi_dcache_clean_inval_poc)
+
+SYM_FUNC_START(__pi_icache_inval_pou)
+alternative_if ARM64_HAS_CACHE_DIC
+ isb
+ ret
+alternative_else_nop_endif
+
+ invalidate_icache_by_line x0, x1, x2, x3
+ ret
+SYM_FUNC_END(__pi_icache_inval_pou)
+SYM_FUNC_ALIAS(icache_inval_pou, __pi_icache_inval_pou)
diff --git a/arch/arm64/kvm/hyp/nvhe/debug-sr.c b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
index df361d839902..7746ea507b6f 100644
--- a/arch/arm64/kvm/hyp/nvhe/debug-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/debug-sr.c
@@ -27,17 +27,16 @@ static void __debug_save_spe(u64 *pmscr_el1)
* Check if the host is actually using it ?
*/
reg = read_sysreg_s(SYS_PMBLIMITR_EL1);
- if (!(reg & BIT(SYS_PMBLIMITR_EL1_E_SHIFT)))
+ if (!(reg & BIT(PMBLIMITR_EL1_E_SHIFT)))
return;
/* Yes; save the control register and disable data generation */
- *pmscr_el1 = read_sysreg_s(SYS_PMSCR_EL1);
- write_sysreg_s(0, SYS_PMSCR_EL1);
+ *pmscr_el1 = read_sysreg_el1(SYS_PMSCR);
+ write_sysreg_el1(0, SYS_PMSCR);
isb();
/* Now drain all buffered data to memory */
psb_csync();
- dsb(nsh);
}
static void __debug_restore_spe(u64 pmscr_el1)
@@ -49,7 +48,7 @@ static void __debug_restore_spe(u64 pmscr_el1)
isb();
/* Re-enable data generation */
- write_sysreg_s(pmscr_el1, SYS_PMSCR_EL1);
+ write_sysreg_el1(pmscr_el1, SYS_PMSCR);
}
static void __debug_save_trace(u64 *trfcr_el1)
@@ -57,19 +56,18 @@ static void __debug_save_trace(u64 *trfcr_el1)
*trfcr_el1 = 0;
/* Check if the TRBE is enabled */
- if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_ENABLE))
+ if (!(read_sysreg_s(SYS_TRBLIMITR_EL1) & TRBLIMITR_EL1_E))
return;
/*
* Prohibit trace generation while we are in guest.
* Since access to TRFCR_EL1 is trapped, the guest can't
* modify the filtering set by the host.
*/
- *trfcr_el1 = read_sysreg_s(SYS_TRFCR_EL1);
- write_sysreg_s(0, SYS_TRFCR_EL1);
+ *trfcr_el1 = read_sysreg_el1(SYS_TRFCR);
+ write_sysreg_el1(0, SYS_TRFCR);
isb();
/* Drain the trace buffer to memory */
tsb_csync();
- dsb(nsh);
}
static void __debug_restore_trace(u64 trfcr_el1)
@@ -78,16 +76,16 @@ static void __debug_restore_trace(u64 trfcr_el1)
return;
/* Restore trace filter controls */
- write_sysreg_s(trfcr_el1, SYS_TRFCR_EL1);
+ write_sysreg_el1(trfcr_el1, SYS_TRFCR);
}
void __debug_save_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
/* Disable and flush SPE data generation */
- if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
+ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
__debug_save_spe(&vcpu->arch.host_debug_state.pmscr_el1);
/* Disable and flush Self-Hosted Trace generation */
- if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
+ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
__debug_save_trace(&vcpu->arch.host_debug_state.trfcr_el1);
}
@@ -98,9 +96,9 @@ void __debug_switch_to_guest(struct kvm_vcpu *vcpu)
void __debug_restore_host_buffers_nvhe(struct kvm_vcpu *vcpu)
{
- if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_SPE)
+ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_SPE))
__debug_restore_spe(vcpu->arch.host_debug_state.pmscr_el1);
- if (vcpu->arch.flags & KVM_ARM64_DEBUG_STATE_SAVE_TRBE)
+ if (vcpu_get_flag(vcpu, DEBUG_STATE_SAVE_TRBE))
__debug_restore_trace(vcpu->arch.host_debug_state.trfcr_el1);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/ffa.c b/arch/arm64/kvm/hyp/nvhe/ffa.c
new file mode 100644
index 000000000000..320f2eaa14a9
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/ffa.c
@@ -0,0 +1,780 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * FF-A v1.0 proxy to filter out invalid memory-sharing SMC calls issued by
+ * the host. FF-A is a slightly more palatable abbreviation of "Arm Firmware
+ * Framework for Arm A-profile", which is specified by Arm in document
+ * number DEN0077.
+ *
+ * Copyright (C) 2022 - Google LLC
+ * Author: Andrew Walbran <qwandor@google.com>
+ *
+ * This driver hooks into the SMC trapping logic for the host and intercepts
+ * all calls falling within the FF-A range. Each call is either:
+ *
+ * - Forwarded on unmodified to the SPMD at EL3
+ * - Rejected as "unsupported"
+ * - Accompanied by a host stage-2 page-table check/update and reissued
+ *
+ * Consequently, any attempts by the host to make guest memory pages
+ * accessible to the secure world using FF-A will be detected either here
+ * (in the case that the memory is already owned by the guest) or during
+ * donation to the guest (in the case that the memory was previously shared
+ * with the secure world).
+ *
+ * To allow the rolling-back of page-table updates and FF-A calls in the
+ * event of failure, operations involving the RXTX buffers are locked for
+ * the duration and are therefore serialised.
+ */
+
+#include <linux/arm-smccc.h>
+#include <linux/arm_ffa.h>
+#include <asm/kvm_pkvm.h>
+
+#include <nvhe/ffa.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/memory.h>
+#include <nvhe/trap_handler.h>
+#include <nvhe/spinlock.h>
+
+/*
+ * "ID value 0 must be returned at the Non-secure physical FF-A instance"
+ * We share this ID with the host.
+ */
+#define HOST_FFA_ID 0
+
+/*
+ * A buffer to hold the maximum descriptor size we can see from the host,
+ * which is required when the SPMD returns a fragmented FFA_MEM_RETRIEVE_RESP
+ * when resolving the handle on the reclaim path.
+ */
+struct kvm_ffa_descriptor_buffer {
+ void *buf;
+ size_t len;
+};
+
+static struct kvm_ffa_descriptor_buffer ffa_desc_buf;
+
+struct kvm_ffa_buffers {
+ hyp_spinlock_t lock;
+ void *tx;
+ void *rx;
+};
+
+/*
+ * Note that we don't currently lock these buffers explicitly, instead
+ * relying on the locking of the host FFA buffers as we only have one
+ * client.
+ */
+static struct kvm_ffa_buffers hyp_buffers;
+static struct kvm_ffa_buffers host_buffers;
+
+static void ffa_to_smccc_error(struct arm_smccc_res *res, u64 ffa_errno)
+{
+ *res = (struct arm_smccc_res) {
+ .a0 = FFA_ERROR,
+ .a2 = ffa_errno,
+ };
+}
+
+static void ffa_to_smccc_res_prop(struct arm_smccc_res *res, int ret, u64 prop)
+{
+ if (ret == FFA_RET_SUCCESS) {
+ *res = (struct arm_smccc_res) { .a0 = FFA_SUCCESS,
+ .a2 = prop };
+ } else {
+ ffa_to_smccc_error(res, ret);
+ }
+}
+
+static void ffa_to_smccc_res(struct arm_smccc_res *res, int ret)
+{
+ ffa_to_smccc_res_prop(res, ret, 0);
+}
+
+static void ffa_set_retval(struct kvm_cpu_context *ctxt,
+ struct arm_smccc_res *res)
+{
+ cpu_reg(ctxt, 0) = res->a0;
+ cpu_reg(ctxt, 1) = res->a1;
+ cpu_reg(ctxt, 2) = res->a2;
+ cpu_reg(ctxt, 3) = res->a3;
+}
+
+static bool is_ffa_call(u64 func_id)
+{
+ return ARM_SMCCC_IS_FAST_CALL(func_id) &&
+ ARM_SMCCC_OWNER_NUM(func_id) == ARM_SMCCC_OWNER_STANDARD &&
+ ARM_SMCCC_FUNC_NUM(func_id) >= FFA_MIN_FUNC_NUM &&
+ ARM_SMCCC_FUNC_NUM(func_id) <= FFA_MAX_FUNC_NUM;
+}
+
+static int ffa_map_hyp_buffers(u64 ffa_page_count)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_smc(FFA_FN64_RXTX_MAP,
+ hyp_virt_to_phys(hyp_buffers.tx),
+ hyp_virt_to_phys(hyp_buffers.rx),
+ ffa_page_count,
+ 0, 0, 0, 0,
+ &res);
+
+ return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
+}
+
+static int ffa_unmap_hyp_buffers(void)
+{
+ struct arm_smccc_res res;
+
+ arm_smccc_1_1_smc(FFA_RXTX_UNMAP,
+ HOST_FFA_ID,
+ 0, 0, 0, 0, 0, 0,
+ &res);
+
+ return res.a0 == FFA_SUCCESS ? FFA_RET_SUCCESS : res.a2;
+}
+
+static void ffa_mem_frag_tx(struct arm_smccc_res *res, u32 handle_lo,
+ u32 handle_hi, u32 fraglen, u32 endpoint_id)
+{
+ arm_smccc_1_1_smc(FFA_MEM_FRAG_TX,
+ handle_lo, handle_hi, fraglen, endpoint_id,
+ 0, 0, 0,
+ res);
+}
+
+static void ffa_mem_frag_rx(struct arm_smccc_res *res, u32 handle_lo,
+ u32 handle_hi, u32 fragoff)
+{
+ arm_smccc_1_1_smc(FFA_MEM_FRAG_RX,
+ handle_lo, handle_hi, fragoff, HOST_FFA_ID,
+ 0, 0, 0,
+ res);
+}
+
+static void ffa_mem_xfer(struct arm_smccc_res *res, u64 func_id, u32 len,
+ u32 fraglen)
+{
+ arm_smccc_1_1_smc(func_id, len, fraglen,
+ 0, 0, 0, 0, 0,
+ res);
+}
+
+static void ffa_mem_reclaim(struct arm_smccc_res *res, u32 handle_lo,
+ u32 handle_hi, u32 flags)
+{
+ arm_smccc_1_1_smc(FFA_MEM_RECLAIM,
+ handle_lo, handle_hi, flags,
+ 0, 0, 0, 0,
+ res);
+}
+
+static void ffa_retrieve_req(struct arm_smccc_res *res, u32 len)
+{
+ arm_smccc_1_1_smc(FFA_FN64_MEM_RETRIEVE_REQ,
+ len, len,
+ 0, 0, 0, 0, 0,
+ res);
+}
+
+static void do_ffa_rxtx_map(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(phys_addr_t, tx, ctxt, 1);
+ DECLARE_REG(phys_addr_t, rx, ctxt, 2);
+ DECLARE_REG(u32, npages, ctxt, 3);
+ int ret = 0;
+ void *rx_virt, *tx_virt;
+
+ if (npages != (KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) / FFA_PAGE_SIZE) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ if (!PAGE_ALIGNED(tx) || !PAGE_ALIGNED(rx)) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ hyp_spin_lock(&host_buffers.lock);
+ if (host_buffers.tx) {
+ ret = FFA_RET_DENIED;
+ goto out_unlock;
+ }
+
+ /*
+ * Map our hypervisor buffers into the SPMD before mapping and
+ * pinning the host buffers in our own address space.
+ */
+ ret = ffa_map_hyp_buffers(npages);
+ if (ret)
+ goto out_unlock;
+
+ ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(tx));
+ if (ret) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto err_unmap;
+ }
+
+ ret = __pkvm_host_share_hyp(hyp_phys_to_pfn(rx));
+ if (ret) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto err_unshare_tx;
+ }
+
+ tx_virt = hyp_phys_to_virt(tx);
+ ret = hyp_pin_shared_mem(tx_virt, tx_virt + 1);
+ if (ret) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto err_unshare_rx;
+ }
+
+ rx_virt = hyp_phys_to_virt(rx);
+ ret = hyp_pin_shared_mem(rx_virt, rx_virt + 1);
+ if (ret) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto err_unpin_tx;
+ }
+
+ host_buffers.tx = tx_virt;
+ host_buffers.rx = rx_virt;
+
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+out:
+ ffa_to_smccc_res(res, ret);
+ return;
+
+err_unpin_tx:
+ hyp_unpin_shared_mem(tx_virt, tx_virt + 1);
+err_unshare_rx:
+ __pkvm_host_unshare_hyp(hyp_phys_to_pfn(rx));
+err_unshare_tx:
+ __pkvm_host_unshare_hyp(hyp_phys_to_pfn(tx));
+err_unmap:
+ ffa_unmap_hyp_buffers();
+ goto out_unlock;
+}
+
+static void do_ffa_rxtx_unmap(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, id, ctxt, 1);
+ int ret = 0;
+
+ if (id != HOST_FFA_ID) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ hyp_spin_lock(&host_buffers.lock);
+ if (!host_buffers.tx) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ hyp_unpin_shared_mem(host_buffers.tx, host_buffers.tx + 1);
+ WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.tx)));
+ host_buffers.tx = NULL;
+
+ hyp_unpin_shared_mem(host_buffers.rx, host_buffers.rx + 1);
+ WARN_ON(__pkvm_host_unshare_hyp(hyp_virt_to_pfn(host_buffers.rx)));
+ host_buffers.rx = NULL;
+
+ ffa_unmap_hyp_buffers();
+
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+out:
+ ffa_to_smccc_res(res, ret);
+}
+
+static u32 __ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
+ u32 nranges)
+{
+ u32 i;
+
+ for (i = 0; i < nranges; ++i) {
+ struct ffa_mem_region_addr_range *range = &ranges[i];
+ u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
+ u64 pfn = hyp_phys_to_pfn(range->address);
+
+ if (!PAGE_ALIGNED(sz))
+ break;
+
+ if (__pkvm_host_share_ffa(pfn, sz / PAGE_SIZE))
+ break;
+ }
+
+ return i;
+}
+
+static u32 __ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
+ u32 nranges)
+{
+ u32 i;
+
+ for (i = 0; i < nranges; ++i) {
+ struct ffa_mem_region_addr_range *range = &ranges[i];
+ u64 sz = (u64)range->pg_cnt * FFA_PAGE_SIZE;
+ u64 pfn = hyp_phys_to_pfn(range->address);
+
+ if (!PAGE_ALIGNED(sz))
+ break;
+
+ if (__pkvm_host_unshare_ffa(pfn, sz / PAGE_SIZE))
+ break;
+ }
+
+ return i;
+}
+
+static int ffa_host_share_ranges(struct ffa_mem_region_addr_range *ranges,
+ u32 nranges)
+{
+ u32 nshared = __ffa_host_share_ranges(ranges, nranges);
+ int ret = 0;
+
+ if (nshared != nranges) {
+ WARN_ON(__ffa_host_unshare_ranges(ranges, nshared) != nshared);
+ ret = FFA_RET_DENIED;
+ }
+
+ return ret;
+}
+
+static int ffa_host_unshare_ranges(struct ffa_mem_region_addr_range *ranges,
+ u32 nranges)
+{
+ u32 nunshared = __ffa_host_unshare_ranges(ranges, nranges);
+ int ret = 0;
+
+ if (nunshared != nranges) {
+ WARN_ON(__ffa_host_share_ranges(ranges, nunshared) != nunshared);
+ ret = FFA_RET_DENIED;
+ }
+
+ return ret;
+}
+
+static void do_ffa_mem_frag_tx(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, handle_lo, ctxt, 1);
+ DECLARE_REG(u32, handle_hi, ctxt, 2);
+ DECLARE_REG(u32, fraglen, ctxt, 3);
+ DECLARE_REG(u32, endpoint_id, ctxt, 4);
+ struct ffa_mem_region_addr_range *buf;
+ int ret = FFA_RET_INVALID_PARAMETERS;
+ u32 nr_ranges;
+
+ if (fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)
+ goto out;
+
+ if (fraglen % sizeof(*buf))
+ goto out;
+
+ hyp_spin_lock(&host_buffers.lock);
+ if (!host_buffers.tx)
+ goto out_unlock;
+
+ buf = hyp_buffers.tx;
+ memcpy(buf, host_buffers.tx, fraglen);
+ nr_ranges = fraglen / sizeof(*buf);
+
+ ret = ffa_host_share_ranges(buf, nr_ranges);
+ if (ret) {
+ /*
+ * We're effectively aborting the transaction, so we need
+ * to restore the global state back to what it was prior to
+ * transmission of the first fragment.
+ */
+ ffa_mem_reclaim(res, handle_lo, handle_hi, 0);
+ WARN_ON(res->a0 != FFA_SUCCESS);
+ goto out_unlock;
+ }
+
+ ffa_mem_frag_tx(res, handle_lo, handle_hi, fraglen, endpoint_id);
+ if (res->a0 != FFA_SUCCESS && res->a0 != FFA_MEM_FRAG_RX)
+ WARN_ON(ffa_host_unshare_ranges(buf, nr_ranges));
+
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+out:
+ if (ret)
+ ffa_to_smccc_res(res, ret);
+
+ /*
+ * If for any reason this did not succeed, we're in trouble as we have
+ * now lost the content of the previous fragments and we can't rollback
+ * the host stage-2 changes. The pages previously marked as shared will
+ * remain stuck in that state forever, hence preventing the host from
+ * sharing/donating them again and may possibly lead to subsequent
+ * failures, but this will not compromise confidentiality.
+ */
+ return;
+}
+
+static __always_inline void do_ffa_mem_xfer(const u64 func_id,
+ struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, len, ctxt, 1);
+ DECLARE_REG(u32, fraglen, ctxt, 2);
+ DECLARE_REG(u64, addr_mbz, ctxt, 3);
+ DECLARE_REG(u32, npages_mbz, ctxt, 4);
+ struct ffa_mem_region_attributes *ep_mem_access;
+ struct ffa_composite_mem_region *reg;
+ struct ffa_mem_region *buf;
+ u32 offset, nr_ranges;
+ int ret = 0;
+
+ BUILD_BUG_ON(func_id != FFA_FN64_MEM_SHARE &&
+ func_id != FFA_FN64_MEM_LEND);
+
+ if (addr_mbz || npages_mbz || fraglen > len ||
+ fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ if (fraglen < sizeof(struct ffa_mem_region) +
+ sizeof(struct ffa_mem_region_attributes)) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out;
+ }
+
+ hyp_spin_lock(&host_buffers.lock);
+ if (!host_buffers.tx) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ buf = hyp_buffers.tx;
+ memcpy(buf, host_buffers.tx, fraglen);
+
+ ep_mem_access = (void *)buf +
+ ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0);
+ offset = ep_mem_access->composite_off;
+ if (!offset || buf->ep_count != 1 || buf->sender_id != HOST_FFA_ID) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ if (fraglen < offset + sizeof(struct ffa_composite_mem_region)) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ reg = (void *)buf + offset;
+ nr_ranges = ((void *)buf + fraglen) - (void *)reg->constituents;
+ if (nr_ranges % sizeof(reg->constituents[0])) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ nr_ranges /= sizeof(reg->constituents[0]);
+ ret = ffa_host_share_ranges(reg->constituents, nr_ranges);
+ if (ret)
+ goto out_unlock;
+
+ ffa_mem_xfer(res, func_id, len, fraglen);
+ if (fraglen != len) {
+ if (res->a0 != FFA_MEM_FRAG_RX)
+ goto err_unshare;
+
+ if (res->a3 != fraglen)
+ goto err_unshare;
+ } else if (res->a0 != FFA_SUCCESS) {
+ goto err_unshare;
+ }
+
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+out:
+ if (ret)
+ ffa_to_smccc_res(res, ret);
+ return;
+
+err_unshare:
+ WARN_ON(ffa_host_unshare_ranges(reg->constituents, nr_ranges));
+ goto out_unlock;
+}
+
+static void do_ffa_mem_reclaim(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, handle_lo, ctxt, 1);
+ DECLARE_REG(u32, handle_hi, ctxt, 2);
+ DECLARE_REG(u32, flags, ctxt, 3);
+ struct ffa_mem_region_attributes *ep_mem_access;
+ struct ffa_composite_mem_region *reg;
+ u32 offset, len, fraglen, fragoff;
+ struct ffa_mem_region *buf;
+ int ret = 0;
+ u64 handle;
+
+ handle = PACK_HANDLE(handle_lo, handle_hi);
+
+ hyp_spin_lock(&host_buffers.lock);
+
+ buf = hyp_buffers.tx;
+ *buf = (struct ffa_mem_region) {
+ .sender_id = HOST_FFA_ID,
+ .handle = handle,
+ };
+
+ ffa_retrieve_req(res, sizeof(*buf));
+ buf = hyp_buffers.rx;
+ if (res->a0 != FFA_MEM_RETRIEVE_RESP)
+ goto out_unlock;
+
+ len = res->a1;
+ fraglen = res->a2;
+
+ ep_mem_access = (void *)buf +
+ ffa_mem_desc_offset(buf, 0, FFA_VERSION_1_0);
+ offset = ep_mem_access->composite_off;
+ /*
+ * We can trust the SPMD to get this right, but let's at least
+ * check that we end up with something that doesn't look _completely_
+ * bogus.
+ */
+ if (WARN_ON(offset > len ||
+ fraglen > KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE)) {
+ ret = FFA_RET_ABORTED;
+ goto out_unlock;
+ }
+
+ if (len > ffa_desc_buf.len) {
+ ret = FFA_RET_NO_MEMORY;
+ goto out_unlock;
+ }
+
+ buf = ffa_desc_buf.buf;
+ memcpy(buf, hyp_buffers.rx, fraglen);
+
+ for (fragoff = fraglen; fragoff < len; fragoff += fraglen) {
+ ffa_mem_frag_rx(res, handle_lo, handle_hi, fragoff);
+ if (res->a0 != FFA_MEM_FRAG_TX) {
+ ret = FFA_RET_INVALID_PARAMETERS;
+ goto out_unlock;
+ }
+
+ fraglen = res->a3;
+ memcpy((void *)buf + fragoff, hyp_buffers.rx, fraglen);
+ }
+
+ ffa_mem_reclaim(res, handle_lo, handle_hi, flags);
+ if (res->a0 != FFA_SUCCESS)
+ goto out_unlock;
+
+ reg = (void *)buf + offset;
+ /* If the SPMD was happy, then we should be too. */
+ WARN_ON(ffa_host_unshare_ranges(reg->constituents,
+ reg->addr_range_cnt));
+out_unlock:
+ hyp_spin_unlock(&host_buffers.lock);
+
+ if (ret)
+ ffa_to_smccc_res(res, ret);
+}
+
+/*
+ * Is a given FFA function supported, either by forwarding on directly
+ * or by handling at EL2?
+ */
+static bool ffa_call_supported(u64 func_id)
+{
+ switch (func_id) {
+ /* Unsupported memory management calls */
+ case FFA_FN64_MEM_RETRIEVE_REQ:
+ case FFA_MEM_RETRIEVE_RESP:
+ case FFA_MEM_RELINQUISH:
+ case FFA_MEM_OP_PAUSE:
+ case FFA_MEM_OP_RESUME:
+ case FFA_MEM_FRAG_RX:
+ case FFA_FN64_MEM_DONATE:
+ /* Indirect message passing via RX/TX buffers */
+ case FFA_MSG_SEND:
+ case FFA_MSG_POLL:
+ case FFA_MSG_WAIT:
+ /* 32-bit variants of 64-bit calls */
+ case FFA_MSG_SEND_DIRECT_REQ:
+ case FFA_MSG_SEND_DIRECT_RESP:
+ case FFA_RXTX_MAP:
+ case FFA_MEM_DONATE:
+ case FFA_MEM_RETRIEVE_REQ:
+ return false;
+ }
+
+ return true;
+}
+
+static bool do_ffa_features(struct arm_smccc_res *res,
+ struct kvm_cpu_context *ctxt)
+{
+ DECLARE_REG(u32, id, ctxt, 1);
+ u64 prop = 0;
+ int ret = 0;
+
+ if (!ffa_call_supported(id)) {
+ ret = FFA_RET_NOT_SUPPORTED;
+ goto out_handled;
+ }
+
+ switch (id) {
+ case FFA_MEM_SHARE:
+ case FFA_FN64_MEM_SHARE:
+ case FFA_MEM_LEND:
+ case FFA_FN64_MEM_LEND:
+ ret = FFA_RET_SUCCESS;
+ prop = 0; /* No support for dynamic buffers */
+ goto out_handled;
+ default:
+ return false;
+ }
+
+out_handled:
+ ffa_to_smccc_res_prop(res, ret, prop);
+ return true;
+}
+
+bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
+{
+ struct arm_smccc_res res;
+
+ /*
+ * There's no way we can tell what a non-standard SMC call might
+ * be up to. Ideally, we would terminate these here and return
+ * an error to the host, but sadly devices make use of custom
+ * firmware calls for things like power management, debugging,
+ * RNG access and crash reporting.
+ *
+ * Given that the architecture requires us to trust EL3 anyway,
+ * we forward unrecognised calls on under the assumption that
+ * the firmware doesn't expose a mechanism to access arbitrary
+ * non-secure memory. Short of a per-device table of SMCs, this
+ * is the best we can do.
+ */
+ if (!is_ffa_call(func_id))
+ return false;
+
+ switch (func_id) {
+ case FFA_FEATURES:
+ if (!do_ffa_features(&res, host_ctxt))
+ return false;
+ goto out_handled;
+ /* Memory management */
+ case FFA_FN64_RXTX_MAP:
+ do_ffa_rxtx_map(&res, host_ctxt);
+ goto out_handled;
+ case FFA_RXTX_UNMAP:
+ do_ffa_rxtx_unmap(&res, host_ctxt);
+ goto out_handled;
+ case FFA_MEM_SHARE:
+ case FFA_FN64_MEM_SHARE:
+ do_ffa_mem_xfer(FFA_FN64_MEM_SHARE, &res, host_ctxt);
+ goto out_handled;
+ case FFA_MEM_RECLAIM:
+ do_ffa_mem_reclaim(&res, host_ctxt);
+ goto out_handled;
+ case FFA_MEM_LEND:
+ case FFA_FN64_MEM_LEND:
+ do_ffa_mem_xfer(FFA_FN64_MEM_LEND, &res, host_ctxt);
+ goto out_handled;
+ case FFA_MEM_FRAG_TX:
+ do_ffa_mem_frag_tx(&res, host_ctxt);
+ goto out_handled;
+ }
+
+ if (ffa_call_supported(func_id))
+ return false; /* Pass through */
+
+ ffa_to_smccc_error(&res, FFA_RET_NOT_SUPPORTED);
+out_handled:
+ ffa_set_retval(host_ctxt, &res);
+ return true;
+}
+
+int hyp_ffa_init(void *pages)
+{
+ struct arm_smccc_res res;
+ size_t min_rxtx_sz;
+ void *tx, *rx;
+
+ if (kvm_host_psci_config.smccc_version < ARM_SMCCC_VERSION_1_2)
+ return 0;
+
+ arm_smccc_1_1_smc(FFA_VERSION, FFA_VERSION_1_0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 == FFA_RET_NOT_SUPPORTED)
+ return 0;
+
+ /*
+ * Firmware returns the maximum supported version of the FF-A
+ * implementation. Check that the returned version is
+ * backwards-compatible with the hyp according to the rules in DEN0077A
+ * v1.1 REL0 13.2.1.
+ *
+ * Of course, things are never simple when dealing with firmware. v1.1
+ * broke ABI with v1.0 on several structures, which is itself
+ * incompatible with the aforementioned versioning scheme. The
+ * expectation is that v1.x implementations that do not support the v1.0
+ * ABI return NOT_SUPPORTED rather than a version number, according to
+ * DEN0077A v1.1 REL0 18.6.4.
+ */
+ if (FFA_MAJOR_VERSION(res.a0) != 1)
+ return -EOPNOTSUPP;
+
+ arm_smccc_1_1_smc(FFA_ID_GET, 0, 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != FFA_SUCCESS)
+ return -EOPNOTSUPP;
+
+ if (res.a2 != HOST_FFA_ID)
+ return -EINVAL;
+
+ arm_smccc_1_1_smc(FFA_FEATURES, FFA_FN64_RXTX_MAP,
+ 0, 0, 0, 0, 0, 0, &res);
+ if (res.a0 != FFA_SUCCESS)
+ return -EOPNOTSUPP;
+
+ switch (res.a2) {
+ case FFA_FEAT_RXTX_MIN_SZ_4K:
+ min_rxtx_sz = SZ_4K;
+ break;
+ case FFA_FEAT_RXTX_MIN_SZ_16K:
+ min_rxtx_sz = SZ_16K;
+ break;
+ case FFA_FEAT_RXTX_MIN_SZ_64K:
+ min_rxtx_sz = SZ_64K;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (min_rxtx_sz > PAGE_SIZE)
+ return -EOPNOTSUPP;
+
+ tx = pages;
+ pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
+ rx = pages;
+ pages += KVM_FFA_MBOX_NR_PAGES * PAGE_SIZE;
+
+ ffa_desc_buf = (struct kvm_ffa_descriptor_buffer) {
+ .buf = pages,
+ .len = PAGE_SIZE *
+ (hyp_ffa_proxy_pages() - (2 * KVM_FFA_MBOX_NR_PAGES)),
+ };
+
+ hyp_buffers = (struct kvm_ffa_buffers) {
+ .lock = __HYP_SPIN_LOCK_UNLOCKED,
+ .tx = tx,
+ .rx = rx,
+ };
+
+ host_buffers = (struct kvm_ffa_buffers) {
+ .lock = __HYP_SPIN_LOCK_UNLOCKED,
+ };
+
+ return 0;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/host.S b/arch/arm64/kvm/hyp/nvhe/host.S
index ea6a397b64a6..135cfb294ee5 100644
--- a/arch/arm64/kvm/hyp/nvhe/host.S
+++ b/arch/arm64/kvm/hyp/nvhe/host.S
@@ -10,6 +10,7 @@
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h>
+#include <asm/kvm_ptrauth.h>
.text
@@ -37,10 +38,43 @@ SYM_FUNC_START(__host_exit)
/* Save the host context pointer in x29 across the function call */
mov x29, x0
+
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+b __skip_pauth_save
+alternative_else_nop_endif
+
+alternative_if ARM64_KVM_PROTECTED_MODE
+ /* Save kernel ptrauth keys. */
+ add x18, x29, #CPU_APIAKEYLO_EL1
+ ptrauth_save_state x18, x19, x20
+
+ /* Use hyp keys. */
+ adr_this_cpu x18, kvm_hyp_ctxt, x19
+ add x18, x18, #CPU_APIAKEYLO_EL1
+ ptrauth_restore_state x18, x19, x20
+ isb
+alternative_else_nop_endif
+__skip_pauth_save:
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
bl handle_trap
- /* Restore host regs x0-x17 */
__host_enter_restore_full:
+ /* Restore kernel keys. */
+#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
+alternative_if_not ARM64_HAS_ADDRESS_AUTH
+b __skip_pauth_restore
+alternative_else_nop_endif
+
+alternative_if ARM64_KVM_PROTECTED_MODE
+ add x18, x29, #CPU_APIAKEYLO_EL1
+ ptrauth_restore_state x18, x19, x20
+alternative_else_nop_endif
+__skip_pauth_restore:
+#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
+
+ /* Restore host regs x0-x17 */
ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
@@ -76,7 +110,7 @@ SYM_FUNC_END(__host_enter)
* u64 elr, u64 par);
*/
SYM_FUNC_START(__hyp_do_panic)
- /* Prepare and exit to the host's panic funciton. */
+ /* Prepare and exit to the host's panic function. */
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
PSR_MODE_EL1h)
msr spsr_el2, lr
@@ -177,13 +211,8 @@ SYM_FUNC_END(__host_hvc)
b hyp_panic
.L__hyp_sp_overflow\@:
- /*
- * Reset SP to the top of the stack, to allow handling the hyp_panic.
- * This corrupts the stack but is ok, since we won't be attempting
- * any unwinding here.
- */
- ldr_this_cpu x0, kvm_init_params + NVHE_INIT_STACK_HYP_VA, x1
- mov sp, x0
+ /* Switch to the overflow stack */
+ adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
b hyp_panic_bad_stack
ASM_BUG()
@@ -268,3 +297,13 @@ SYM_CODE_START(__kvm_hyp_host_forward_smc)
ret
SYM_CODE_END(__kvm_hyp_host_forward_smc)
+
+/*
+ * kvm_host_psci_cpu_entry is called through br instruction, which requires
+ * bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external
+ * functions, but bti c instead.
+ */
+SYM_CODE_START(kvm_host_psci_cpu_entry)
+ bti j
+ b __kvm_host_psci_cpu_entry
+SYM_CODE_END(kvm_host_psci_cpu_entry)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
index c953fb4b9a13..2994878d68ea 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
@@ -57,6 +57,7 @@ __do_hyp_init:
cmp x0, #HVC_STUB_HCALL_NR
b.lo __kvm_handle_stub_hvc
+ bic x0, x0, #ARM_SMCCC_CALL_HINTS
mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
cmp x0, x3
b.eq 1f
@@ -83,9 +84,6 @@ SYM_CODE_END(__kvm_hyp_init)
* x0: struct kvm_nvhe_init_params PA
*/
SYM_CODE_START_LOCAL(___kvm_hyp_init)
- ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]
- msr tpidr_el2, x1
-
ldr x1, [x0, #NVHE_INIT_STACK_HYP_VA]
mov sp, x1
@@ -95,6 +93,22 @@ SYM_CODE_START_LOCAL(___kvm_hyp_init)
ldr x1, [x0, #NVHE_INIT_HCR_EL2]
msr hcr_el2, x1
+ mov x2, #HCR_E2H
+ and x2, x1, x2
+ cbz x2, 1f
+
+ // hVHE: Replay the EL2 setup to account for the E2H bit
+ // TPIDR_EL2 is used to preserve x0 across the macro maze...
+ isb
+ msr tpidr_el2, x0
+ init_el2_state
+ finalise_el2_state
+ mrs x0, tpidr_el2
+
+1:
+ ldr x1, [x0, #NVHE_INIT_TPIDR_EL2]
+ msr tpidr_el2, x1
+
ldr x1, [x0, #NVHE_INIT_VTTBR]
msr vttbr_el2, x1
@@ -108,11 +122,7 @@ alternative_if ARM64_HAS_CNP
alternative_else_nop_endif
msr ttbr0_el2, x2
- /*
- * Set the PS bits in TCR_EL2.
- */
ldr x0, [x0, #NVHE_INIT_TCR_EL2]
- tcr_compute_pa_size x0, #TCR_EL2_PS_SHIFT, x1, x2
msr tcr_el2, x0
isb
@@ -128,6 +138,13 @@ alternative_if ARM64_HAS_ADDRESS_AUTH
SCTLR_ELx_ENDA | SCTLR_ELx_ENDB)
orr x0, x0, x1
alternative_else_nop_endif
+
+#ifdef CONFIG_ARM64_BTI_KERNEL
+alternative_if ARM64_BTI
+ orr x0, x0, #SCTLR_EL2_BT
+alternative_else_nop_endif
+#endif /* CONFIG_ARM64_BTI_KERNEL */
+
msr sctlr_el2, x0
isb
@@ -183,6 +200,8 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
/* Initialize EL2 CPU state to sane values. */
init_el2_state // Clobbers x0..x2
+ finalise_el2_state
+ __init_el2_nvhe_prepare_eret
/* Enable MMU, set vectors and stack. */
mov x0, x28
@@ -195,6 +214,11 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
SYM_CODE_END(__kvm_hyp_init_cpu)
SYM_CODE_START(__kvm_handle_stub_hvc)
+ /*
+ * __kvm_handle_stub_hvc called from __host_hvc through branch instruction(br) so
+ * we need bti j at beginning.
+ */
+ bti j
cmp x0, #HVC_SOFT_RESTART
b.ne 1f
@@ -264,6 +288,8 @@ alternative_else_nop_endif
mov sp, x0
/* And turn the MMU back on! */
+ dsb nsh
+ isb
set_sctlr_el2 x2
ret x1
SYM_FUNC_END(__pkvm_init_switch_pgd)
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
index 3cea4b6ac23e..2385fd03ed87 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
@@ -13,19 +13,96 @@
#include <asm/kvm_hyp.h>
#include <asm/kvm_mmu.h>
+#include <nvhe/ffa.h>
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
+#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
DEFINE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt);
+static void flush_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+
+ hyp_vcpu->vcpu.arch.ctxt = host_vcpu->arch.ctxt;
+
+ hyp_vcpu->vcpu.arch.sve_state = kern_hyp_va(host_vcpu->arch.sve_state);
+ hyp_vcpu->vcpu.arch.sve_max_vl = host_vcpu->arch.sve_max_vl;
+
+ hyp_vcpu->vcpu.arch.hw_mmu = host_vcpu->arch.hw_mmu;
+
+ hyp_vcpu->vcpu.arch.hcr_el2 = host_vcpu->arch.hcr_el2;
+ hyp_vcpu->vcpu.arch.mdcr_el2 = host_vcpu->arch.mdcr_el2;
+ hyp_vcpu->vcpu.arch.cptr_el2 = host_vcpu->arch.cptr_el2;
+
+ hyp_vcpu->vcpu.arch.iflags = host_vcpu->arch.iflags;
+ hyp_vcpu->vcpu.arch.fp_state = host_vcpu->arch.fp_state;
+
+ hyp_vcpu->vcpu.arch.debug_ptr = kern_hyp_va(host_vcpu->arch.debug_ptr);
+ hyp_vcpu->vcpu.arch.host_fpsimd_state = host_vcpu->arch.host_fpsimd_state;
+
+ hyp_vcpu->vcpu.arch.vsesr_el2 = host_vcpu->arch.vsesr_el2;
+
+ hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3 = host_vcpu->arch.vgic_cpu.vgic_v3;
+}
+
+static void sync_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ struct kvm_vcpu *host_vcpu = hyp_vcpu->host_vcpu;
+ struct vgic_v3_cpu_if *hyp_cpu_if = &hyp_vcpu->vcpu.arch.vgic_cpu.vgic_v3;
+ struct vgic_v3_cpu_if *host_cpu_if = &host_vcpu->arch.vgic_cpu.vgic_v3;
+ unsigned int i;
+
+ host_vcpu->arch.ctxt = hyp_vcpu->vcpu.arch.ctxt;
+
+ host_vcpu->arch.hcr_el2 = hyp_vcpu->vcpu.arch.hcr_el2;
+ host_vcpu->arch.cptr_el2 = hyp_vcpu->vcpu.arch.cptr_el2;
+
+ host_vcpu->arch.fault = hyp_vcpu->vcpu.arch.fault;
+
+ host_vcpu->arch.iflags = hyp_vcpu->vcpu.arch.iflags;
+ host_vcpu->arch.fp_state = hyp_vcpu->vcpu.arch.fp_state;
+
+ host_cpu_if->vgic_hcr = hyp_cpu_if->vgic_hcr;
+ for (i = 0; i < hyp_cpu_if->used_lrs; ++i)
+ host_cpu_if->vgic_lr[i] = hyp_cpu_if->vgic_lr[i];
+}
+
static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt)
{
- DECLARE_REG(struct kvm_vcpu *, vcpu, host_ctxt, 1);
+ DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 1);
+ int ret;
+
+ host_vcpu = kern_hyp_va(host_vcpu);
+
+ if (unlikely(is_protected_kvm_enabled())) {
+ struct pkvm_hyp_vcpu *hyp_vcpu;
+ struct kvm *host_kvm;
+
+ host_kvm = kern_hyp_va(host_vcpu->kvm);
+ hyp_vcpu = pkvm_load_hyp_vcpu(host_kvm->arch.pkvm.handle,
+ host_vcpu->vcpu_idx);
+ if (!hyp_vcpu) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ flush_hyp_vcpu(hyp_vcpu);
+
+ ret = __kvm_vcpu_run(&hyp_vcpu->vcpu);
+
+ sync_hyp_vcpu(hyp_vcpu);
+ pkvm_put_hyp_vcpu(hyp_vcpu);
+ } else {
+ /* The host is fully trusted, run its vCPU directly. */
+ ret = __kvm_vcpu_run(host_vcpu);
+ }
- cpu_reg(host_ctxt, 1) = __kvm_vcpu_run(kern_hyp_va(vcpu));
+out:
+ cpu_reg(host_ctxt, 1) = ret;
}
static void handle___kvm_adjust_pc(struct kvm_cpu_context *host_ctxt)
@@ -49,6 +126,25 @@ static void handle___kvm_tlb_flush_vmid_ipa(struct kvm_cpu_context *host_ctxt)
__kvm_tlb_flush_vmid_ipa(kern_hyp_va(mmu), ipa, level);
}
+static void handle___kvm_tlb_flush_vmid_ipa_nsh(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+ DECLARE_REG(phys_addr_t, ipa, host_ctxt, 2);
+ DECLARE_REG(int, level, host_ctxt, 3);
+
+ __kvm_tlb_flush_vmid_ipa_nsh(kern_hyp_va(mmu), ipa, level);
+}
+
+static void
+handle___kvm_tlb_flush_vmid_range(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
+ DECLARE_REG(phys_addr_t, start, host_ctxt, 2);
+ DECLARE_REG(unsigned long, pages, host_ctxt, 3);
+
+ __kvm_tlb_flush_vmid_range(kern_hyp_va(mmu), start, pages);
+}
+
static void handle___kvm_tlb_flush_vmid(struct kvm_cpu_context *host_ctxt)
{
DECLARE_REG(struct kvm_s2_mmu *, mmu, host_ctxt, 1);
@@ -191,6 +287,33 @@ static void handle___pkvm_vcpu_init_traps(struct kvm_cpu_context *host_ctxt)
__pkvm_vcpu_init_traps(kern_hyp_va(vcpu));
}
+static void handle___pkvm_init_vm(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(struct kvm *, host_kvm, host_ctxt, 1);
+ DECLARE_REG(unsigned long, vm_hva, host_ctxt, 2);
+ DECLARE_REG(unsigned long, pgd_hva, host_ctxt, 3);
+
+ host_kvm = kern_hyp_va(host_kvm);
+ cpu_reg(host_ctxt, 1) = __pkvm_init_vm(host_kvm, vm_hva, pgd_hva);
+}
+
+static void handle___pkvm_init_vcpu(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+ DECLARE_REG(struct kvm_vcpu *, host_vcpu, host_ctxt, 2);
+ DECLARE_REG(unsigned long, vcpu_hva, host_ctxt, 3);
+
+ host_vcpu = kern_hyp_va(host_vcpu);
+ cpu_reg(host_ctxt, 1) = __pkvm_init_vcpu(handle, host_vcpu, vcpu_hva);
+}
+
+static void handle___pkvm_teardown_vm(struct kvm_cpu_context *host_ctxt)
+{
+ DECLARE_REG(pkvm_handle_t, handle, host_ctxt, 1);
+
+ cpu_reg(host_ctxt, 1) = __pkvm_teardown_vm(handle);
+}
+
typedef void (*hcall_t)(struct kvm_cpu_context *);
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
@@ -212,7 +335,9 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__kvm_vcpu_run),
HANDLE_FUNC(__kvm_flush_vm_context),
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
+ HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa_nsh),
HANDLE_FUNC(__kvm_tlb_flush_vmid),
+ HANDLE_FUNC(__kvm_tlb_flush_vmid_range),
HANDLE_FUNC(__kvm_flush_cpu_context),
HANDLE_FUNC(__kvm_timer_set_cntvoff),
HANDLE_FUNC(__vgic_v3_read_vmcr),
@@ -220,6 +345,9 @@ static const hcall_t host_hcall[] = {
HANDLE_FUNC(__vgic_v3_save_aprs),
HANDLE_FUNC(__vgic_v3_restore_aprs),
HANDLE_FUNC(__pkvm_vcpu_init_traps),
+ HANDLE_FUNC(__pkvm_init_vm),
+ HANDLE_FUNC(__pkvm_init_vcpu),
+ HANDLE_FUNC(__pkvm_teardown_vm),
};
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
@@ -240,6 +368,7 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
if (static_branch_unlikely(&kvm_protected_mode_initialized))
hcall_min = __KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize;
+ id &= ~ARM_SMCCC_CALL_HINTS;
id -= KVM_HOST_SMCCC_ID(0);
if (unlikely(id < hcall_min || id >= ARRAY_SIZE(host_hcall)))
@@ -264,9 +393,14 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
{
+ DECLARE_REG(u64, func_id, host_ctxt, 0);
bool handled;
- handled = kvm_host_psci_handler(host_ctxt);
+ func_id &= ~ARM_SMCCC_CALL_HINTS;
+
+ handled = kvm_host_psci_handler(host_ctxt, func_id);
+ if (!handled)
+ handled = kvm_host_ffa_handler(host_ctxt, func_id);
if (!handled)
default_host_smc_handler(host_ctxt);
@@ -286,7 +420,11 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
handle_host_smc(host_ctxt);
break;
case ESR_ELx_EC_SVE:
- sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
+ if (has_hvhe())
+ sysreg_clear_set(cpacr_el1, 0, (CPACR_EL1_ZEN_EL1EN |
+ CPACR_EL1_ZEN_EL0EN));
+ else
+ sysreg_clear_set(cptr_el2, CPTR_EL2_TZ, 0);
isb();
sve_cond_update_zcr_vq(ZCR_ELx_LEN_MASK, SYS_ZCR_EL2);
break;
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
index 9f54833af400..04d194583f1e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
+++ b/arch/arm64/kvm/hyp/nvhe/hyp-smp.c
@@ -23,6 +23,8 @@ u64 cpu_logical_map(unsigned int cpu)
return hyp_cpu_logical_map[cpu];
}
+unsigned long __ro_after_init kvm_arm_hyp_percpu_base[NR_CPUS];
+
unsigned long __hyp_per_cpu_offset(unsigned int cpu)
{
unsigned long *cpu_base_array;
diff --git a/arch/arm64/kvm/hyp/nvhe/list_debug.c b/arch/arm64/kvm/hyp/nvhe/list_debug.c
index d68abd7ea124..46a2d4f2b3c6 100644
--- a/arch/arm64/kvm/hyp/nvhe/list_debug.c
+++ b/arch/arm64/kvm/hyp/nvhe/list_debug.c
@@ -26,8 +26,9 @@ static inline __must_check bool nvhe_check_data_corruption(bool v)
/* The predicates checked here are taken from lib/list_debug.c. */
-bool __list_add_valid(struct list_head *new, struct list_head *prev,
- struct list_head *next)
+__list_valid_slowpath
+bool __list_add_valid_or_report(struct list_head *new, struct list_head *prev,
+ struct list_head *next)
{
if (NVHE_CHECK_DATA_CORRUPTION(next->prev != prev) ||
NVHE_CHECK_DATA_CORRUPTION(prev->next != next) ||
@@ -37,7 +38,8 @@ bool __list_add_valid(struct list_head *new, struct list_head *prev,
return true;
}
-bool __list_del_entry_valid(struct list_head *entry)
+__list_valid_slowpath
+bool __list_del_entry_valid_or_report(struct list_head *entry)
{
struct list_head *prev, *next;
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 1e78acf9662e..861c76021a25 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -21,21 +21,33 @@
#define KVM_HOST_S2_FLAGS (KVM_PGTABLE_S2_NOFWB | KVM_PGTABLE_S2_IDMAP)
-extern unsigned long hyp_nr_cpus;
-struct host_kvm host_kvm;
+struct host_mmu host_mmu;
static struct hyp_pool host_s2_pool;
-const u8 pkvm_hyp_id = 1;
+static DEFINE_PER_CPU(struct pkvm_hyp_vm *, __current_vm);
+#define current_vm (*this_cpu_ptr(&__current_vm))
+
+static void guest_lock_component(struct pkvm_hyp_vm *vm)
+{
+ hyp_spin_lock(&vm->lock);
+ current_vm = vm;
+}
+
+static void guest_unlock_component(struct pkvm_hyp_vm *vm)
+{
+ current_vm = NULL;
+ hyp_spin_unlock(&vm->lock);
+}
static void host_lock_component(void)
{
- hyp_spin_lock(&host_kvm.lock);
+ hyp_spin_lock(&host_mmu.lock);
}
static void host_unlock_component(void)
{
- hyp_spin_unlock(&host_kvm.lock);
+ hyp_spin_unlock(&host_mmu.lock);
}
static void hyp_lock_component(void)
@@ -79,6 +91,11 @@ static void host_s2_put_page(void *addr)
hyp_put_page(&host_s2_pool, addr);
}
+static void host_s2_free_unlinked_table(void *addr, s8 level)
+{
+ kvm_pgtable_stage2_free_unlinked(&host_mmu.mm_ops, addr, level);
+}
+
static int prepare_s2_pool(void *pgt_pool_base)
{
unsigned long nr_pages, pfn;
@@ -90,9 +107,10 @@ static int prepare_s2_pool(void *pgt_pool_base)
if (ret)
return ret;
- host_kvm.mm_ops = (struct kvm_pgtable_mm_ops) {
+ host_mmu.mm_ops = (struct kvm_pgtable_mm_ops) {
.zalloc_pages_exact = host_s2_zalloc_pages_exact,
.zalloc_page = host_s2_zalloc_page,
+ .free_unlinked_table = host_s2_free_unlinked_table,
.phys_to_virt = hyp_phys_to_virt,
.virt_to_phys = hyp_virt_to_phys,
.page_count = hyp_page_count,
@@ -111,53 +129,185 @@ static void prepare_host_vtcr(void)
parange = kvm_get_parange(id_aa64mmfr0_el1_sys_val);
phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
- host_kvm.arch.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
- id_aa64mmfr1_el1_sys_val, phys_shift);
+ host_mmu.arch.mmu.vtcr = kvm_get_vtcr(id_aa64mmfr0_el1_sys_val,
+ id_aa64mmfr1_el1_sys_val, phys_shift);
}
static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot prot);
int kvm_host_prepare_stage2(void *pgt_pool_base)
{
- struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
+ struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
int ret;
prepare_host_vtcr();
- hyp_spin_lock_init(&host_kvm.lock);
- mmu->arch = &host_kvm.arch;
+ hyp_spin_lock_init(&host_mmu.lock);
+ mmu->arch = &host_mmu.arch;
ret = prepare_s2_pool(pgt_pool_base);
if (ret)
return ret;
- ret = __kvm_pgtable_stage2_init(&host_kvm.pgt, mmu,
- &host_kvm.mm_ops, KVM_HOST_S2_FLAGS,
+ ret = __kvm_pgtable_stage2_init(&host_mmu.pgt, mmu,
+ &host_mmu.mm_ops, KVM_HOST_S2_FLAGS,
host_stage2_force_pte_cb);
if (ret)
return ret;
- mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
- mmu->pgt = &host_kvm.pgt;
+ mmu->pgd_phys = __hyp_pa(host_mmu.pgt.pgd);
+ mmu->pgt = &host_mmu.pgt;
atomic64_set(&mmu->vmid.id, 0);
return 0;
}
+static bool guest_stage2_force_pte_cb(u64 addr, u64 end,
+ enum kvm_pgtable_prot prot)
+{
+ return true;
+}
+
+static void *guest_s2_zalloc_pages_exact(size_t size)
+{
+ void *addr = hyp_alloc_pages(&current_vm->pool, get_order(size));
+
+ WARN_ON(size != (PAGE_SIZE << get_order(size)));
+ hyp_split_page(hyp_virt_to_page(addr));
+
+ return addr;
+}
+
+static void guest_s2_free_pages_exact(void *addr, unsigned long size)
+{
+ u8 order = get_order(size);
+ unsigned int i;
+
+ for (i = 0; i < (1 << order); i++)
+ hyp_put_page(&current_vm->pool, addr + (i * PAGE_SIZE));
+}
+
+static void *guest_s2_zalloc_page(void *mc)
+{
+ struct hyp_page *p;
+ void *addr;
+
+ addr = hyp_alloc_pages(&current_vm->pool, 0);
+ if (addr)
+ return addr;
+
+ addr = pop_hyp_memcache(mc, hyp_phys_to_virt);
+ if (!addr)
+ return addr;
+
+ memset(addr, 0, PAGE_SIZE);
+ p = hyp_virt_to_page(addr);
+ memset(p, 0, sizeof(*p));
+ p->refcount = 1;
+
+ return addr;
+}
+
+static void guest_s2_get_page(void *addr)
+{
+ hyp_get_page(&current_vm->pool, addr);
+}
+
+static void guest_s2_put_page(void *addr)
+{
+ hyp_put_page(&current_vm->pool, addr);
+}
+
+static void clean_dcache_guest_page(void *va, size_t size)
+{
+ __clean_dcache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
+ hyp_fixmap_unmap();
+}
+
+static void invalidate_icache_guest_page(void *va, size_t size)
+{
+ __invalidate_icache_guest_page(hyp_fixmap_map(__hyp_pa(va)), size);
+ hyp_fixmap_unmap();
+}
+
+int kvm_guest_prepare_stage2(struct pkvm_hyp_vm *vm, void *pgd)
+{
+ struct kvm_s2_mmu *mmu = &vm->kvm.arch.mmu;
+ unsigned long nr_pages;
+ int ret;
+
+ nr_pages = kvm_pgtable_stage2_pgd_size(mmu->vtcr) >> PAGE_SHIFT;
+ ret = hyp_pool_init(&vm->pool, hyp_virt_to_pfn(pgd), nr_pages, 0);
+ if (ret)
+ return ret;
+
+ hyp_spin_lock_init(&vm->lock);
+ vm->mm_ops = (struct kvm_pgtable_mm_ops) {
+ .zalloc_pages_exact = guest_s2_zalloc_pages_exact,
+ .free_pages_exact = guest_s2_free_pages_exact,
+ .zalloc_page = guest_s2_zalloc_page,
+ .phys_to_virt = hyp_phys_to_virt,
+ .virt_to_phys = hyp_virt_to_phys,
+ .page_count = hyp_page_count,
+ .get_page = guest_s2_get_page,
+ .put_page = guest_s2_put_page,
+ .dcache_clean_inval_poc = clean_dcache_guest_page,
+ .icache_inval_pou = invalidate_icache_guest_page,
+ };
+
+ guest_lock_component(vm);
+ ret = __kvm_pgtable_stage2_init(mmu->pgt, mmu, &vm->mm_ops, 0,
+ guest_stage2_force_pte_cb);
+ guest_unlock_component(vm);
+ if (ret)
+ return ret;
+
+ vm->kvm.arch.mmu.pgd_phys = __hyp_pa(vm->pgt.pgd);
+
+ return 0;
+}
+
+void reclaim_guest_pages(struct pkvm_hyp_vm *vm, struct kvm_hyp_memcache *mc)
+{
+ void *addr;
+
+ /* Dump all pgtable pages in the hyp_pool */
+ guest_lock_component(vm);
+ kvm_pgtable_stage2_destroy(&vm->pgt);
+ vm->kvm.arch.mmu.pgd_phys = 0ULL;
+ guest_unlock_component(vm);
+
+ /* Drain the hyp_pool into the memcache */
+ addr = hyp_alloc_pages(&vm->pool, 0);
+ while (addr) {
+ memset(hyp_virt_to_page(addr), 0, sizeof(struct hyp_page));
+ push_hyp_memcache(mc, addr, hyp_virt_to_phys);
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(addr), 1));
+ addr = hyp_alloc_pages(&vm->pool, 0);
+ }
+}
+
int __pkvm_prot_finalize(void)
{
- struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
+ struct kvm_s2_mmu *mmu = &host_mmu.arch.mmu;
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
if (params->hcr_el2 & HCR_VM)
return -EPERM;
params->vttbr = kvm_get_vttbr(mmu);
- params->vtcr = host_kvm.arch.vtcr;
+ params->vtcr = mmu->vtcr;
params->hcr_el2 |= HCR_VM;
+
+ /*
+ * The CMO below not only cleans the updated params to the
+ * PoC, but also provides the DSB that ensures ongoing
+ * page-table walks that have started before we trapped to EL2
+ * have completed.
+ */
kvm_flush_dcache_to_poc(params, sizeof(*params));
write_sysreg(params->hcr_el2, hcr_el2);
- __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
+ __load_stage2(&host_mmu.arch.mmu, &host_mmu.arch);
/*
* Make sure to have an ISB before the TLB maintenance below but only
@@ -175,7 +325,7 @@ int __pkvm_prot_finalize(void)
static int host_stage2_unmap_dev_all(void)
{
- struct kvm_pgtable *pgt = &host_kvm.pgt;
+ struct kvm_pgtable *pgt = &host_mmu.pgt;
struct memblock_region *reg;
u64 addr = 0;
int i, ret;
@@ -195,7 +345,7 @@ struct kvm_mem_range {
u64 end;
};
-static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
+static struct memblock_region *find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
{
int cur, left = 0, right = hyp_memblock_nr;
struct memblock_region *reg;
@@ -218,18 +368,28 @@ static bool find_mem_range(phys_addr_t addr, struct kvm_mem_range *range)
} else {
range->start = reg->base;
range->end = end;
- return true;
+ return reg;
}
}
- return false;
+ return NULL;
}
bool addr_is_memory(phys_addr_t phys)
{
struct kvm_mem_range range;
- return find_mem_range(phys, &range);
+ return !!find_mem_range(phys, &range);
+}
+
+static bool addr_is_allowed_memory(phys_addr_t phys)
+{
+ struct memblock_region *reg;
+ struct kvm_mem_range range;
+
+ reg = find_mem_range(phys, &range);
+
+ return reg && !(reg->flags & MEMBLOCK_NOMAP);
}
static bool is_in_mem_range(u64 addr, struct kvm_mem_range *range)
@@ -250,8 +410,8 @@ static bool range_is_memory(u64 start, u64 end)
static inline int __host_stage2_idmap(u64 start, u64 end,
enum kvm_pgtable_prot prot)
{
- return kvm_pgtable_stage2_map(&host_kvm.pgt, start, end - start, start,
- prot, &host_s2_pool);
+ return kvm_pgtable_stage2_map(&host_mmu.pgt, start, end - start, start,
+ prot, &host_s2_pool, 0);
}
/*
@@ -263,7 +423,7 @@ static inline int __host_stage2_idmap(u64 start, u64 end,
#define host_stage2_try(fn, ...) \
({ \
int __ret; \
- hyp_assert_lock_held(&host_kvm.lock); \
+ hyp_assert_lock_held(&host_mmu.lock); \
__ret = fn(__VA_ARGS__); \
if (__ret == -ENOMEM) { \
__ret = host_stage2_unmap_dev_all(); \
@@ -283,11 +443,11 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
{
struct kvm_mem_range cur;
kvm_pte_t pte;
- u32 level;
+ s8 level;
int ret;
- hyp_assert_lock_held(&host_kvm.lock);
- ret = kvm_pgtable_get_leaf(&host_kvm.pgt, addr, &pte, &level);
+ hyp_assert_lock_held(&host_mmu.lock);
+ ret = kvm_pgtable_get_leaf(&host_mmu.pgt, addr, &pte, &level);
if (ret)
return ret;
@@ -302,7 +462,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
cur.start = ALIGN_DOWN(addr, granule);
cur.end = cur.start + granule;
level++;
- } while ((level < KVM_PGTABLE_MAX_LEVELS) &&
+ } while ((level <= KVM_PGTABLE_LAST_LEVEL) &&
!(kvm_level_supports_block_mapping(level) &&
range_included(&cur, range)));
@@ -319,7 +479,7 @@ int host_stage2_idmap_locked(phys_addr_t addr, u64 size,
int host_stage2_set_owner_locked(phys_addr_t addr, u64 size, u8 owner_id)
{
- return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_kvm.pgt,
+ return host_stage2_try(kvm_pgtable_stage2_set_owner, &host_mmu.pgt,
addr, size, &host_s2_pool, owner_id);
}
@@ -348,7 +508,7 @@ static bool host_stage2_force_pte_cb(u64 addr, u64 end, enum kvm_pgtable_prot pr
static int host_stage2_idmap(u64 addr)
{
struct kvm_mem_range range;
- bool is_memory = find_mem_range(addr, &range);
+ bool is_memory = !!find_mem_range(addr, &range);
enum kvm_pgtable_prot prot;
int ret;
@@ -380,12 +540,6 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt)
BUG_ON(ret && ret != -EAGAIN);
}
-/* This corresponds to locking order */
-enum pkvm_component_id {
- PKVM_ID_HOST,
- PKVM_ID_HYP,
-};
-
struct pkvm_mem_transition {
u64 nr_pages;
@@ -399,6 +553,9 @@ struct pkvm_mem_transition {
/* Address in the completer's address space */
u64 completer_addr;
} host;
+ struct {
+ u64 completer_addr;
+ } hyp;
};
} initiator;
@@ -412,23 +569,21 @@ struct pkvm_mem_share {
const enum kvm_pgtable_prot completer_prot;
};
+struct pkvm_mem_donation {
+ const struct pkvm_mem_transition tx;
+};
+
struct check_walk_data {
enum pkvm_page_state desired;
- enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
+ enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
};
-static int __check_page_state_visitor(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct check_walk_data *d = arg;
- kvm_pte_t pte = *ptep;
-
- if (kvm_pte_valid(pte) && !addr_is_memory(kvm_pte_to_phys(pte)))
- return -EINVAL;
+ struct check_walk_data *d = ctx->arg;
- return d->get_page_state(pte) == d->desired ? 0 : -EPERM;
+ return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM;
}
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
@@ -443,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
return kvm_pgtable_walk(pgt, addr, size, &walker);
}
-static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
+static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
{
+ if (!addr_is_allowed_memory(addr))
+ return PKVM_NOPAGE;
+
if (!kvm_pte_valid(pte) && pte)
return PKVM_NOPAGE;
@@ -459,8 +617,8 @@ static int __host_check_page_state_range(u64 addr, u64 size,
.get_page_state = host_get_page_state,
};
- hyp_assert_lock_held(&host_kvm.lock);
- return check_page_state_range(&host_kvm.pgt, addr, size, &d);
+ hyp_assert_lock_held(&host_mmu.lock);
+ return check_page_state_range(&host_mmu.pgt, addr, size, &d);
}
static int __host_set_page_state_range(u64 addr, u64 size,
@@ -511,12 +669,52 @@ static int host_initiate_unshare(u64 *completer_addr,
return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
-static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
+static int host_initiate_donation(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u8 owner_id = tx->completer.id;
+ u64 size = tx->nr_pages * PAGE_SIZE;
+
+ *completer_addr = tx->initiator.host.completer_addr;
+ return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id);
+}
+
+static bool __host_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
+{
+ return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
+ tx->initiator.id != PKVM_ID_HYP);
+}
+
+static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx,
+ enum pkvm_page_state state)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+
+ if (__host_ack_skip_pgtable_check(tx))
+ return 0;
+
+ return __host_check_page_state_range(addr, size, state);
+}
+
+static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
+{
+ return __host_ack_transition(addr, tx, PKVM_NOPAGE);
+}
+
+static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ u8 host_id = tx->completer.id;
+
+ return host_stage2_set_owner_locked(addr, size, host_id);
+}
+
+static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
{
if (!kvm_pte_valid(pte))
return PKVM_NOPAGE;
- return pkvm_getstate(kvm_pgtable_stage2_pte_prot(pte));
+ return pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
}
static int __hyp_check_page_state_range(u64 addr, u64 size,
@@ -531,6 +729,27 @@ static int __hyp_check_page_state_range(u64 addr, u64 size,
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
}
+static int hyp_request_donation(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ u64 addr = tx->initiator.addr;
+
+ *completer_addr = tx->initiator.hyp.completer_addr;
+ return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
+}
+
+static int hyp_initiate_donation(u64 *completer_addr,
+ const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+ int ret;
+
+ *completer_addr = tx->initiator.hyp.completer_addr;
+ ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size);
+ return (ret != size) ? -EFAULT : 0;
+}
+
static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
{
return !(IS_ENABLED(CONFIG_NVHE_EL2_DEBUG) ||
@@ -555,6 +774,9 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
+ if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
+ return -EBUSY;
+
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
@@ -562,6 +784,16 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
PKVM_PAGE_SHARED_BORROWED);
}
+static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
+{
+ u64 size = tx->nr_pages * PAGE_SIZE;
+
+ if (__hyp_ack_skip_pgtable_check(tx))
+ return 0;
+
+ return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
+}
+
static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
@@ -580,6 +812,15 @@ static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
return (ret != size) ? -EFAULT : 0;
}
+static int hyp_complete_donation(u64 addr,
+ const struct pkvm_mem_transition *tx)
+{
+ void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
+ enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
+
+ return pkvm_create_mappings_locked(start, end, prot);
+}
+
static int check_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
@@ -601,6 +842,13 @@ static int check_share(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
break;
+ case PKVM_ID_FFA:
+ /*
+ * We only check the host; the secure side will check the other
+ * end when we forward the FFA call.
+ */
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
@@ -629,6 +877,13 @@ static int __do_share(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
break;
+ case PKVM_ID_FFA:
+ /*
+ * We're not responsible for any secure page-tables, so there's
+ * nothing to do here.
+ */
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
@@ -677,6 +932,10 @@ static int check_unshare(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_ack_unshare(completer_addr, tx);
break;
+ case PKVM_ID_FFA:
+ /* See check_share() */
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
@@ -705,6 +964,10 @@ static int __do_unshare(struct pkvm_mem_share *share)
case PKVM_ID_HYP:
ret = hyp_complete_unshare(completer_addr, tx);
break;
+ case PKVM_ID_FFA:
+ /* See __do_share() */
+ ret = 0;
+ break;
default:
ret = -EINVAL;
}
@@ -732,6 +995,94 @@ static int do_unshare(struct pkvm_mem_share *share)
return WARN_ON(__do_unshare(share));
}
+static int check_donation(struct pkvm_mem_donation *donation)
+{
+ const struct pkvm_mem_transition *tx = &donation->tx;
+ u64 completer_addr;
+ int ret;
+
+ switch (tx->initiator.id) {
+ case PKVM_ID_HOST:
+ ret = host_request_owned_transition(&completer_addr, tx);
+ break;
+ case PKVM_ID_HYP:
+ ret = hyp_request_donation(&completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ switch (tx->completer.id) {
+ case PKVM_ID_HOST:
+ ret = host_ack_donation(completer_addr, tx);
+ break;
+ case PKVM_ID_HYP:
+ ret = hyp_ack_donation(completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+static int __do_donate(struct pkvm_mem_donation *donation)
+{
+ const struct pkvm_mem_transition *tx = &donation->tx;
+ u64 completer_addr;
+ int ret;
+
+ switch (tx->initiator.id) {
+ case PKVM_ID_HOST:
+ ret = host_initiate_donation(&completer_addr, tx);
+ break;
+ case PKVM_ID_HYP:
+ ret = hyp_initiate_donation(&completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ switch (tx->completer.id) {
+ case PKVM_ID_HOST:
+ ret = host_complete_donation(completer_addr, tx);
+ break;
+ case PKVM_ID_HYP:
+ ret = hyp_complete_donation(completer_addr, tx);
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+/*
+ * do_donate():
+ *
+ * The page owner transfers ownership to another component, losing access
+ * as a consequence.
+ *
+ * Initiator: OWNED => NOPAGE
+ * Completer: NOPAGE => OWNED
+ */
+static int do_donate(struct pkvm_mem_donation *donation)
+{
+ int ret;
+
+ ret = check_donation(donation);
+ if (ret)
+ return ret;
+
+ return WARN_ON(__do_donate(donation));
+}
+
int __pkvm_host_share_hyp(u64 pfn)
{
int ret;
@@ -797,3 +1148,158 @@ int __pkvm_host_unshare_hyp(u64 pfn)
return ret;
}
+
+int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
+{
+ int ret;
+ u64 host_addr = hyp_pfn_to_phys(pfn);
+ u64 hyp_addr = (u64)__hyp_va(host_addr);
+ struct pkvm_mem_donation donation = {
+ .tx = {
+ .nr_pages = nr_pages,
+ .initiator = {
+ .id = PKVM_ID_HOST,
+ .addr = host_addr,
+ .host = {
+ .completer_addr = hyp_addr,
+ },
+ },
+ .completer = {
+ .id = PKVM_ID_HYP,
+ },
+ },
+ };
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = do_donate(&donation);
+
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
+}
+
+int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
+{
+ int ret;
+ u64 host_addr = hyp_pfn_to_phys(pfn);
+ u64 hyp_addr = (u64)__hyp_va(host_addr);
+ struct pkvm_mem_donation donation = {
+ .tx = {
+ .nr_pages = nr_pages,
+ .initiator = {
+ .id = PKVM_ID_HYP,
+ .addr = hyp_addr,
+ .hyp = {
+ .completer_addr = host_addr,
+ },
+ },
+ .completer = {
+ .id = PKVM_ID_HOST,
+ },
+ },
+ };
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = do_donate(&donation);
+
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
+}
+
+int hyp_pin_shared_mem(void *from, void *to)
+{
+ u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+ u64 end = PAGE_ALIGN((u64)to);
+ u64 size = end - start;
+ int ret;
+
+ host_lock_component();
+ hyp_lock_component();
+
+ ret = __host_check_page_state_range(__hyp_pa(start), size,
+ PKVM_PAGE_SHARED_OWNED);
+ if (ret)
+ goto unlock;
+
+ ret = __hyp_check_page_state_range(start, size,
+ PKVM_PAGE_SHARED_BORROWED);
+ if (ret)
+ goto unlock;
+
+ for (cur = start; cur < end; cur += PAGE_SIZE)
+ hyp_page_ref_inc(hyp_virt_to_page(cur));
+
+unlock:
+ hyp_unlock_component();
+ host_unlock_component();
+
+ return ret;
+}
+
+void hyp_unpin_shared_mem(void *from, void *to)
+{
+ u64 cur, start = ALIGN_DOWN((u64)from, PAGE_SIZE);
+ u64 end = PAGE_ALIGN((u64)to);
+
+ host_lock_component();
+ hyp_lock_component();
+
+ for (cur = start; cur < end; cur += PAGE_SIZE)
+ hyp_page_ref_dec(hyp_virt_to_page(cur));
+
+ hyp_unlock_component();
+ host_unlock_component();
+}
+
+int __pkvm_host_share_ffa(u64 pfn, u64 nr_pages)
+{
+ int ret;
+ struct pkvm_mem_share share = {
+ .tx = {
+ .nr_pages = nr_pages,
+ .initiator = {
+ .id = PKVM_ID_HOST,
+ .addr = hyp_pfn_to_phys(pfn),
+ },
+ .completer = {
+ .id = PKVM_ID_FFA,
+ },
+ },
+ };
+
+ host_lock_component();
+ ret = do_share(&share);
+ host_unlock_component();
+
+ return ret;
+}
+
+int __pkvm_host_unshare_ffa(u64 pfn, u64 nr_pages)
+{
+ int ret;
+ struct pkvm_mem_share share = {
+ .tx = {
+ .nr_pages = nr_pages,
+ .initiator = {
+ .id = PKVM_ID_HOST,
+ .addr = hyp_pfn_to_phys(pfn),
+ },
+ .completer = {
+ .id = PKVM_ID_FFA,
+ },
+ },
+ };
+
+ host_lock_component();
+ ret = do_unshare(&share);
+ host_unlock_component();
+
+ return ret;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/mm.c b/arch/arm64/kvm/hyp/nvhe/mm.c
index 96193cb31a39..8850b591d775 100644
--- a/arch/arm64/kvm/hyp/nvhe/mm.c
+++ b/arch/arm64/kvm/hyp/nvhe/mm.c
@@ -14,6 +14,7 @@
#include <nvhe/early_alloc.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
+#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
#include <nvhe/spinlock.h>
@@ -25,6 +26,12 @@ unsigned int hyp_memblock_nr;
static u64 __io_map_base;
+struct hyp_fixmap_slot {
+ u64 addr;
+ kvm_pte_t *ptep;
+};
+static DEFINE_PER_CPU(struct hyp_fixmap_slot, fixmap_slots);
+
static int __pkvm_create_mappings(unsigned long start, unsigned long size,
unsigned long phys, enum kvm_pgtable_prot prot)
{
@@ -37,6 +44,27 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
return err;
}
+static int __pkvm_alloc_private_va_range(unsigned long start, size_t size)
+{
+ unsigned long cur;
+
+ hyp_assert_lock_held(&pkvm_pgd_lock);
+
+ if (!start || start < __io_map_base)
+ return -EINVAL;
+
+ /* The allocated size is always a multiple of PAGE_SIZE */
+ cur = start + PAGE_ALIGN(size);
+
+ /* Are we overflowing on the vmemmap ? */
+ if (cur > __hyp_vmemmap)
+ return -ENOMEM;
+
+ __io_map_base = cur;
+
+ return 0;
+}
+
/**
* pkvm_alloc_private_va_range - Allocates a private VA range.
* @size: The size of the VA range to reserve.
@@ -49,27 +77,16 @@ static int __pkvm_create_mappings(unsigned long start, unsigned long size,
*/
int pkvm_alloc_private_va_range(size_t size, unsigned long *haddr)
{
- unsigned long base, addr;
- int ret = 0;
+ unsigned long addr;
+ int ret;
hyp_spin_lock(&pkvm_pgd_lock);
-
- /* Align the allocation based on the order of its size */
- addr = ALIGN(__io_map_base, PAGE_SIZE << get_order(size));
-
- /* The allocated size is always a multiple of PAGE_SIZE */
- base = addr + PAGE_ALIGN(size);
-
- /* Are we overflowing on the vmemmap ? */
- if (!addr || base > __hyp_vmemmap)
- ret = -ENOMEM;
- else {
- __io_map_base = base;
- *haddr = addr;
- }
-
+ addr = __io_map_base;
+ ret = __pkvm_alloc_private_va_range(addr, size);
hyp_spin_unlock(&pkvm_pgd_lock);
+ *haddr = addr;
+
return ret;
}
@@ -129,13 +146,36 @@ int pkvm_create_mappings(void *from, void *to, enum kvm_pgtable_prot prot)
return ret;
}
-int hyp_back_vmemmap(phys_addr_t phys, unsigned long size, phys_addr_t back)
+int hyp_back_vmemmap(phys_addr_t back)
{
- unsigned long start, end;
+ unsigned long i, start, size, end = 0;
+ int ret;
- hyp_vmemmap_range(phys, size, &start, &end);
+ for (i = 0; i < hyp_memblock_nr; i++) {
+ start = hyp_memory[i].base;
+ start = ALIGN_DOWN((u64)hyp_phys_to_page(start), PAGE_SIZE);
+ /*
+ * The beginning of the hyp_vmemmap region for the current
+ * memblock may already be backed by the page backing the end
+ * the previous region, so avoid mapping it twice.
+ */
+ start = max(start, end);
+
+ end = hyp_memory[i].base + hyp_memory[i].size;
+ end = PAGE_ALIGN((u64)hyp_phys_to_page(end));
+ if (start >= end)
+ continue;
+
+ size = end - start;
+ ret = __pkvm_create_mappings(start, size, back, PAGE_HYP);
+ if (ret)
+ return ret;
+
+ memset(hyp_phys_to_virt(back), 0, size);
+ back += size;
+ }
- return __pkvm_create_mappings(start, end - start, back, PAGE_HYP);
+ return 0;
}
static void *__hyp_bp_vect_base;
@@ -189,6 +229,102 @@ int hyp_map_vectors(void)
return 0;
}
+void *hyp_fixmap_map(phys_addr_t phys)
+{
+ struct hyp_fixmap_slot *slot = this_cpu_ptr(&fixmap_slots);
+ kvm_pte_t pte, *ptep = slot->ptep;
+
+ pte = *ptep;
+ pte &= ~kvm_phys_to_pte(KVM_PHYS_INVALID);
+ pte |= kvm_phys_to_pte(phys) | KVM_PTE_VALID;
+ WRITE_ONCE(*ptep, pte);
+ dsb(ishst);
+
+ return (void *)slot->addr;
+}
+
+static void fixmap_clear_slot(struct hyp_fixmap_slot *slot)
+{
+ kvm_pte_t *ptep = slot->ptep;
+ u64 addr = slot->addr;
+
+ WRITE_ONCE(*ptep, *ptep & ~KVM_PTE_VALID);
+
+ /*
+ * Irritatingly, the architecture requires that we use inner-shareable
+ * broadcast TLB invalidation here in case another CPU speculates
+ * through our fixmap and decides to create an "amalagamation of the
+ * values held in the TLB" due to the apparent lack of a
+ * break-before-make sequence.
+ *
+ * https://lore.kernel.org/kvm/20221017115209.2099-1-will@kernel.org/T/#mf10dfbaf1eaef9274c581b81c53758918c1d0f03
+ */
+ dsb(ishst);
+ __tlbi_level(vale2is, __TLBI_VADDR(addr, 0), KVM_PGTABLE_LAST_LEVEL);
+ dsb(ish);
+ isb();
+}
+
+void hyp_fixmap_unmap(void)
+{
+ fixmap_clear_slot(this_cpu_ptr(&fixmap_slots));
+}
+
+static int __create_fixmap_slot_cb(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
+{
+ struct hyp_fixmap_slot *slot = per_cpu_ptr(&fixmap_slots, (u64)ctx->arg);
+
+ if (!kvm_pte_valid(ctx->old) || ctx->level != KVM_PGTABLE_LAST_LEVEL)
+ return -EINVAL;
+
+ slot->addr = ctx->addr;
+ slot->ptep = ctx->ptep;
+
+ /*
+ * Clear the PTE, but keep the page-table page refcount elevated to
+ * prevent it from ever being freed. This lets us manipulate the PTEs
+ * by hand safely without ever needing to allocate memory.
+ */
+ fixmap_clear_slot(slot);
+
+ return 0;
+}
+
+static int create_fixmap_slot(u64 addr, u64 cpu)
+{
+ struct kvm_pgtable_walker walker = {
+ .cb = __create_fixmap_slot_cb,
+ .flags = KVM_PGTABLE_WALK_LEAF,
+ .arg = (void *)cpu,
+ };
+
+ return kvm_pgtable_walk(&pkvm_pgtable, addr, PAGE_SIZE, &walker);
+}
+
+int hyp_create_pcpu_fixmap(void)
+{
+ unsigned long addr, i;
+ int ret;
+
+ for (i = 0; i < hyp_nr_cpus; i++) {
+ ret = pkvm_alloc_private_va_range(PAGE_SIZE, &addr);
+ if (ret)
+ return ret;
+
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr, PAGE_SIZE,
+ __hyp_pa(__hyp_bss_start), PAGE_HYP);
+ if (ret)
+ return ret;
+
+ ret = create_fixmap_slot(addr, i);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
int hyp_create_idmap(u32 hyp_va_bits)
{
unsigned long start, end;
@@ -213,3 +349,75 @@ int hyp_create_idmap(u32 hyp_va_bits)
return __pkvm_create_mappings(start, end - start, start, PAGE_HYP_EXEC);
}
+
+int pkvm_create_stack(phys_addr_t phys, unsigned long *haddr)
+{
+ unsigned long addr, prev_base;
+ size_t size;
+ int ret;
+
+ hyp_spin_lock(&pkvm_pgd_lock);
+
+ prev_base = __io_map_base;
+ /*
+ * Efficient stack verification using the PAGE_SHIFT bit implies
+ * an alignment of our allocation on the order of the size.
+ */
+ size = PAGE_SIZE * 2;
+ addr = ALIGN(__io_map_base, size);
+
+ ret = __pkvm_alloc_private_va_range(addr, size);
+ if (!ret) {
+ /*
+ * Since the stack grows downwards, map the stack to the page
+ * at the higher address and leave the lower guard page
+ * unbacked.
+ *
+ * Any valid stack address now has the PAGE_SHIFT bit as 1
+ * and addresses corresponding to the guard page have the
+ * PAGE_SHIFT bit as 0 - this is used for overflow detection.
+ */
+ ret = kvm_pgtable_hyp_map(&pkvm_pgtable, addr + PAGE_SIZE,
+ PAGE_SIZE, phys, PAGE_HYP);
+ if (ret)
+ __io_map_base = prev_base;
+ }
+ hyp_spin_unlock(&pkvm_pgd_lock);
+
+ *haddr = addr + size;
+
+ return ret;
+}
+
+static void *admit_host_page(void *arg)
+{
+ struct kvm_hyp_memcache *host_mc = arg;
+
+ if (!host_mc->nr_pages)
+ return NULL;
+
+ /*
+ * The host still owns the pages in its memcache, so we need to go
+ * through a full host-to-hyp donation cycle to change it. Fortunately,
+ * __pkvm_host_donate_hyp() takes care of races for us, so if it
+ * succeeds we're good to go.
+ */
+ if (__pkvm_host_donate_hyp(hyp_phys_to_pfn(host_mc->head), 1))
+ return NULL;
+
+ return pop_hyp_memcache(host_mc, hyp_phys_to_virt);
+}
+
+/* Refill our local memcache by popping pages from the one provided by the host. */
+int refill_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages,
+ struct kvm_hyp_memcache *host_mc)
+{
+ struct kvm_hyp_memcache tmp = *host_mc;
+ int ret;
+
+ ret = __topup_hyp_memcache(mc, min_pages, admit_host_page,
+ hyp_virt_to_phys, &tmp);
+ *host_mc = tmp;
+
+ return ret;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/page_alloc.c b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
index d40f0b30b534..e691290d3765 100644
--- a/arch/arm64/kvm/hyp/nvhe/page_alloc.c
+++ b/arch/arm64/kvm/hyp/nvhe/page_alloc.c
@@ -93,11 +93,16 @@ static inline struct hyp_page *node_to_page(struct list_head *node)
static void __hyp_attach_page(struct hyp_pool *pool,
struct hyp_page *p)
{
+ phys_addr_t phys = hyp_page_to_phys(p);
unsigned short order = p->order;
struct hyp_page *buddy;
memset(hyp_page_to_virt(p), 0, PAGE_SIZE << p->order);
+ /* Skip coalescing for 'external' pages being freed into the pool. */
+ if (phys < pool->range_start || phys >= pool->range_end)
+ goto insert;
+
/*
* Only the first struct hyp_page of a high-order page (otherwise known
* as the 'head') should have p->order set. The non-head pages should
@@ -105,7 +110,7 @@ static void __hyp_attach_page(struct hyp_pool *pool,
* after coalescing, so make sure to mark it HYP_NO_ORDER proactively.
*/
p->order = HYP_NO_ORDER;
- for (; (order + 1) < pool->max_order; order++) {
+ for (; (order + 1) <= pool->max_order; order++) {
buddy = __find_buddy_avail(pool, p, order);
if (!buddy)
break;
@@ -116,6 +121,7 @@ static void __hyp_attach_page(struct hyp_pool *pool,
p = min(p, buddy);
}
+insert:
/* Mark the new head, and insert it */
p->order = order;
page_add_to_list(p, &pool->free_area[order]);
@@ -144,25 +150,6 @@ static struct hyp_page *__hyp_extract_page(struct hyp_pool *pool,
return p;
}
-static inline void hyp_page_ref_inc(struct hyp_page *p)
-{
- BUG_ON(p->refcount == USHRT_MAX);
- p->refcount++;
-}
-
-static inline int hyp_page_ref_dec_and_test(struct hyp_page *p)
-{
- BUG_ON(!p->refcount);
- p->refcount--;
- return (p->refcount == 0);
-}
-
-static inline void hyp_set_page_refcounted(struct hyp_page *p)
-{
- BUG_ON(p->refcount);
- p->refcount = 1;
-}
-
static void __hyp_put_page(struct hyp_pool *pool, struct hyp_page *p)
{
if (hyp_page_ref_dec_and_test(p))
@@ -216,9 +203,9 @@ void *hyp_alloc_pages(struct hyp_pool *pool, unsigned short order)
hyp_spin_lock(&pool->lock);
/* Look for a high-enough-order page */
- while (i < pool->max_order && list_empty(&pool->free_area[i]))
+ while (i <= pool->max_order && list_empty(&pool->free_area[i]))
i++;
- if (i >= pool->max_order) {
+ if (i > pool->max_order) {
hyp_spin_unlock(&pool->lock);
return NULL;
}
@@ -241,18 +228,17 @@ int hyp_pool_init(struct hyp_pool *pool, u64 pfn, unsigned int nr_pages,
int i;
hyp_spin_lock_init(&pool->lock);
- pool->max_order = min(MAX_ORDER, get_order((nr_pages + 1) << PAGE_SHIFT));
- for (i = 0; i < pool->max_order; i++)
+ pool->max_order = min(MAX_PAGE_ORDER,
+ get_order(nr_pages << PAGE_SHIFT));
+ for (i = 0; i <= pool->max_order; i++)
INIT_LIST_HEAD(&pool->free_area[i]);
pool->range_start = phys;
pool->range_end = phys + (nr_pages << PAGE_SHIFT);
/* Init the vmemmap portion */
p = hyp_phys_to_page(phys);
- for (i = 0; i < nr_pages; i++) {
- p[i].order = 0;
+ for (i = 0; i < nr_pages; i++)
hyp_set_page_refcounted(&p[i]);
- }
/* Attach the unused pages to the buddy tree */
for (i = reserved_pages; i < nr_pages; i++)
diff --git a/arch/arm64/kvm/hyp/nvhe/pkvm.c b/arch/arm64/kvm/hyp/nvhe/pkvm.c
index 99c8d8b73e70..26dd9a20ad6e 100644
--- a/arch/arm64/kvm/hyp/nvhe/pkvm.c
+++ b/arch/arm64/kvm/hyp/nvhe/pkvm.c
@@ -7,8 +7,17 @@
#include <linux/kvm_host.h>
#include <linux/mm.h>
#include <nvhe/fixed_config.h>
+#include <nvhe/mem_protect.h>
+#include <nvhe/memory.h>
+#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
+/* Used by icache_is_aliasing(). */
+unsigned long __icache_flags;
+
+/* Used by kvm_get_vttbr(). */
+unsigned int kvm_arm_vmid_bits;
+
/*
* Set trap register values based on features in ID_AA64PFR0.
*/
@@ -18,42 +27,51 @@ static void pvm_init_traps_aa64pfr0(struct kvm_vcpu *vcpu)
u64 hcr_set = HCR_RW;
u64 hcr_clear = 0;
u64 cptr_set = 0;
+ u64 cptr_clear = 0;
/* Protected KVM does not support AArch32 guests. */
- BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL0),
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY);
- BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1),
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_ELx_64BIT_ONLY);
+ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0),
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
+ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) != ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
/*
* Linux guests assume support for floating-point and Advanced SIMD. Do
* not change the trapping behavior for these from the KVM default.
*/
- BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_FP),
+ BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_FP),
PVM_ID_AA64PFR0_ALLOW));
- BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_ASIMD),
+ BUILD_BUG_ON(!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AdvSIMD),
PVM_ID_AA64PFR0_ALLOW));
+ if (has_hvhe())
+ hcr_set |= HCR_E2H;
+
/* Trap RAS unless all current versions are supported */
- if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_RAS), feature_ids) <
- ID_AA64PFR0_RAS_V1P1) {
+ if (FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_RAS), feature_ids) <
+ ID_AA64PFR0_EL1_RAS_V1P1) {
hcr_set |= HCR_TERR | HCR_TEA;
hcr_clear |= HCR_FIEN;
}
/* Trap AMU */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_AMU), feature_ids)) {
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU), feature_ids)) {
hcr_clear |= HCR_AMVOFFEN;
cptr_set |= CPTR_EL2_TAM;
}
/* Trap SVE */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_SVE), feature_ids))
- cptr_set |= CPTR_EL2_TZ;
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE), feature_ids)) {
+ if (has_hvhe())
+ cptr_clear |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN;
+ else
+ cptr_set |= CPTR_EL2_TZ;
+ }
vcpu->arch.hcr_el2 |= hcr_set;
vcpu->arch.hcr_el2 &= ~hcr_clear;
vcpu->arch.cptr_el2 |= cptr_set;
+ vcpu->arch.cptr_el2 &= ~cptr_clear;
}
/*
@@ -66,7 +84,7 @@ static void pvm_init_traps_aa64pfr1(struct kvm_vcpu *vcpu)
u64 hcr_clear = 0;
/* Memory Tagging: Trap and Treat as Untagged if not supported. */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_MTE), feature_ids)) {
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE), feature_ids)) {
hcr_set |= HCR_TID5;
hcr_clear |= HCR_DCT | HCR_ATA;
}
@@ -86,33 +104,41 @@ static void pvm_init_traps_aa64dfr0(struct kvm_vcpu *vcpu)
u64 cptr_set = 0;
/* Trap/constrain PMU */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMUVER), feature_ids)) {
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPM | MDCR_EL2_TPMCR;
mdcr_clear |= MDCR_EL2_HPME | MDCR_EL2_MTPME |
MDCR_EL2_HPMN_MASK;
}
/* Trap Debug */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DEBUGVER), feature_ids))
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), feature_ids))
mdcr_set |= MDCR_EL2_TDRA | MDCR_EL2_TDA | MDCR_EL2_TDE;
/* Trap OS Double Lock */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_DOUBLELOCK), feature_ids))
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DoubleLock), feature_ids))
mdcr_set |= MDCR_EL2_TDOSA;
/* Trap SPE */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_PMSVER), feature_ids)) {
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer), feature_ids)) {
mdcr_set |= MDCR_EL2_TPMS;
mdcr_clear |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
}
/* Trap Trace Filter */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACE_FILT), feature_ids))
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceFilt), feature_ids))
mdcr_set |= MDCR_EL2_TTRF;
/* Trap Trace */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_TRACEVER), feature_ids))
- cptr_set |= CPTR_EL2_TTA;
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_TraceVer), feature_ids)) {
+ if (has_hvhe())
+ cptr_set |= CPACR_EL1_TTA;
+ else
+ cptr_set |= CPTR_EL2_TTA;
+ }
+
+ /* Trap External Trace */
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_ExtTrcBuff), feature_ids))
+ mdcr_clear |= MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT;
vcpu->arch.mdcr_el2 |= mdcr_set;
vcpu->arch.mdcr_el2 &= ~mdcr_clear;
@@ -128,7 +154,7 @@ static void pvm_init_traps_aa64mmfr0(struct kvm_vcpu *vcpu)
u64 mdcr_set = 0;
/* Trap Debug Communications Channel registers */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_FGT), feature_ids))
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_EL1_FGT), feature_ids))
mdcr_set |= MDCR_EL2_TDCC;
vcpu->arch.mdcr_el2 |= mdcr_set;
@@ -143,7 +169,7 @@ static void pvm_init_traps_aa64mmfr1(struct kvm_vcpu *vcpu)
u64 hcr_set = 0;
/* Trap LOR */
- if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_LOR), feature_ids))
+ if (!FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR1_EL1_LO), feature_ids))
hcr_set |= HCR_TLOR;
vcpu->arch.hcr_el2 |= hcr_set;
@@ -167,8 +193,10 @@ static void pvm_init_trap_regs(struct kvm_vcpu *vcpu)
/* Clear res0 and set res1 bits to trap potential new features. */
vcpu->arch.hcr_el2 &= ~(HCR_RES0);
vcpu->arch.mdcr_el2 &= ~(MDCR_EL2_RES0);
- vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
- vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+ if (!has_hvhe()) {
+ vcpu->arch.cptr_el2 |= CPTR_NVHE_EL2_RES1;
+ vcpu->arch.cptr_el2 &= ~(CPTR_NVHE_EL2_RES0);
+ }
}
/*
@@ -183,3 +211,430 @@ void __pkvm_vcpu_init_traps(struct kvm_vcpu *vcpu)
pvm_init_traps_aa64mmfr0(vcpu);
pvm_init_traps_aa64mmfr1(vcpu);
}
+
+/*
+ * Start the VM table handle at the offset defined instead of at 0.
+ * Mainly for sanity checking and debugging.
+ */
+#define HANDLE_OFFSET 0x1000
+
+static unsigned int vm_handle_to_idx(pkvm_handle_t handle)
+{
+ return handle - HANDLE_OFFSET;
+}
+
+static pkvm_handle_t idx_to_vm_handle(unsigned int idx)
+{
+ return idx + HANDLE_OFFSET;
+}
+
+/*
+ * Spinlock for protecting state related to the VM table. Protects writes
+ * to 'vm_table' and 'nr_table_entries' as well as reads and writes to
+ * 'last_hyp_vcpu_lookup'.
+ */
+static DEFINE_HYP_SPINLOCK(vm_table_lock);
+
+/*
+ * The table of VM entries for protected VMs in hyp.
+ * Allocated at hyp initialization and setup.
+ */
+static struct pkvm_hyp_vm **vm_table;
+
+void pkvm_hyp_vm_table_init(void *tbl)
+{
+ WARN_ON(vm_table);
+ vm_table = tbl;
+}
+
+/*
+ * Return the hyp vm structure corresponding to the handle.
+ */
+static struct pkvm_hyp_vm *get_vm_by_handle(pkvm_handle_t handle)
+{
+ unsigned int idx = vm_handle_to_idx(handle);
+
+ if (unlikely(idx >= KVM_MAX_PVMS))
+ return NULL;
+
+ return vm_table[idx];
+}
+
+struct pkvm_hyp_vcpu *pkvm_load_hyp_vcpu(pkvm_handle_t handle,
+ unsigned int vcpu_idx)
+{
+ struct pkvm_hyp_vcpu *hyp_vcpu = NULL;
+ struct pkvm_hyp_vm *hyp_vm;
+
+ hyp_spin_lock(&vm_table_lock);
+ hyp_vm = get_vm_by_handle(handle);
+ if (!hyp_vm || hyp_vm->nr_vcpus <= vcpu_idx)
+ goto unlock;
+
+ hyp_vcpu = hyp_vm->vcpus[vcpu_idx];
+ hyp_page_ref_inc(hyp_virt_to_page(hyp_vm));
+unlock:
+ hyp_spin_unlock(&vm_table_lock);
+ return hyp_vcpu;
+}
+
+void pkvm_put_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu)
+{
+ struct pkvm_hyp_vm *hyp_vm = pkvm_hyp_vcpu_to_hyp_vm(hyp_vcpu);
+
+ hyp_spin_lock(&vm_table_lock);
+ hyp_page_ref_dec(hyp_virt_to_page(hyp_vm));
+ hyp_spin_unlock(&vm_table_lock);
+}
+
+static void unpin_host_vcpu(struct kvm_vcpu *host_vcpu)
+{
+ if (host_vcpu)
+ hyp_unpin_shared_mem(host_vcpu, host_vcpu + 1);
+}
+
+static void unpin_host_vcpus(struct pkvm_hyp_vcpu *hyp_vcpus[],
+ unsigned int nr_vcpus)
+{
+ int i;
+
+ for (i = 0; i < nr_vcpus; i++)
+ unpin_host_vcpu(hyp_vcpus[i]->host_vcpu);
+}
+
+static void init_pkvm_hyp_vm(struct kvm *host_kvm, struct pkvm_hyp_vm *hyp_vm,
+ unsigned int nr_vcpus)
+{
+ hyp_vm->host_kvm = host_kvm;
+ hyp_vm->kvm.created_vcpus = nr_vcpus;
+ hyp_vm->kvm.arch.mmu.vtcr = host_mmu.arch.mmu.vtcr;
+}
+
+static int init_pkvm_hyp_vcpu(struct pkvm_hyp_vcpu *hyp_vcpu,
+ struct pkvm_hyp_vm *hyp_vm,
+ struct kvm_vcpu *host_vcpu,
+ unsigned int vcpu_idx)
+{
+ int ret = 0;
+
+ if (hyp_pin_shared_mem(host_vcpu, host_vcpu + 1))
+ return -EBUSY;
+
+ if (host_vcpu->vcpu_idx != vcpu_idx) {
+ ret = -EINVAL;
+ goto done;
+ }
+
+ hyp_vcpu->host_vcpu = host_vcpu;
+
+ hyp_vcpu->vcpu.kvm = &hyp_vm->kvm;
+ hyp_vcpu->vcpu.vcpu_id = READ_ONCE(host_vcpu->vcpu_id);
+ hyp_vcpu->vcpu.vcpu_idx = vcpu_idx;
+
+ hyp_vcpu->vcpu.arch.hw_mmu = &hyp_vm->kvm.arch.mmu;
+ hyp_vcpu->vcpu.arch.cflags = READ_ONCE(host_vcpu->arch.cflags);
+done:
+ if (ret)
+ unpin_host_vcpu(host_vcpu);
+ return ret;
+}
+
+static int find_free_vm_table_entry(struct kvm *host_kvm)
+{
+ int i;
+
+ for (i = 0; i < KVM_MAX_PVMS; ++i) {
+ if (!vm_table[i])
+ return i;
+ }
+
+ return -ENOMEM;
+}
+
+/*
+ * Allocate a VM table entry and insert a pointer to the new vm.
+ *
+ * Return a unique handle to the protected VM on success,
+ * negative error code on failure.
+ */
+static pkvm_handle_t insert_vm_table_entry(struct kvm *host_kvm,
+ struct pkvm_hyp_vm *hyp_vm)
+{
+ struct kvm_s2_mmu *mmu = &hyp_vm->kvm.arch.mmu;
+ int idx;
+
+ hyp_assert_lock_held(&vm_table_lock);
+
+ /*
+ * Initializing protected state might have failed, yet a malicious
+ * host could trigger this function. Thus, ensure that 'vm_table'
+ * exists.
+ */
+ if (unlikely(!vm_table))
+ return -EINVAL;
+
+ idx = find_free_vm_table_entry(host_kvm);
+ if (idx < 0)
+ return idx;
+
+ hyp_vm->kvm.arch.pkvm.handle = idx_to_vm_handle(idx);
+
+ /* VMID 0 is reserved for the host */
+ atomic64_set(&mmu->vmid.id, idx + 1);
+
+ mmu->arch = &hyp_vm->kvm.arch;
+ mmu->pgt = &hyp_vm->pgt;
+
+ vm_table[idx] = hyp_vm;
+ return hyp_vm->kvm.arch.pkvm.handle;
+}
+
+/*
+ * Deallocate and remove the VM table entry corresponding to the handle.
+ */
+static void remove_vm_table_entry(pkvm_handle_t handle)
+{
+ hyp_assert_lock_held(&vm_table_lock);
+ vm_table[vm_handle_to_idx(handle)] = NULL;
+}
+
+static size_t pkvm_get_hyp_vm_size(unsigned int nr_vcpus)
+{
+ return size_add(sizeof(struct pkvm_hyp_vm),
+ size_mul(sizeof(struct pkvm_hyp_vcpu *), nr_vcpus));
+}
+
+static void *map_donated_memory_noclear(unsigned long host_va, size_t size)
+{
+ void *va = (void *)kern_hyp_va(host_va);
+
+ if (!PAGE_ALIGNED(va))
+ return NULL;
+
+ if (__pkvm_host_donate_hyp(hyp_virt_to_pfn(va),
+ PAGE_ALIGN(size) >> PAGE_SHIFT))
+ return NULL;
+
+ return va;
+}
+
+static void *map_donated_memory(unsigned long host_va, size_t size)
+{
+ void *va = map_donated_memory_noclear(host_va, size);
+
+ if (va)
+ memset(va, 0, size);
+
+ return va;
+}
+
+static void __unmap_donated_memory(void *va, size_t size)
+{
+ WARN_ON(__pkvm_hyp_donate_host(hyp_virt_to_pfn(va),
+ PAGE_ALIGN(size) >> PAGE_SHIFT));
+}
+
+static void unmap_donated_memory(void *va, size_t size)
+{
+ if (!va)
+ return;
+
+ memset(va, 0, size);
+ __unmap_donated_memory(va, size);
+}
+
+static void unmap_donated_memory_noclear(void *va, size_t size)
+{
+ if (!va)
+ return;
+
+ __unmap_donated_memory(va, size);
+}
+
+/*
+ * Initialize the hypervisor copy of the protected VM state using the
+ * memory donated by the host.
+ *
+ * Unmaps the donated memory from the host at stage 2.
+ *
+ * host_kvm: A pointer to the host's struct kvm.
+ * vm_hva: The host va of the area being donated for the VM state.
+ * Must be page aligned.
+ * pgd_hva: The host va of the area being donated for the stage-2 PGD for
+ * the VM. Must be page aligned. Its size is implied by the VM's
+ * VTCR.
+ *
+ * Return a unique handle to the protected VM on success,
+ * negative error code on failure.
+ */
+int __pkvm_init_vm(struct kvm *host_kvm, unsigned long vm_hva,
+ unsigned long pgd_hva)
+{
+ struct pkvm_hyp_vm *hyp_vm = NULL;
+ size_t vm_size, pgd_size;
+ unsigned int nr_vcpus;
+ void *pgd = NULL;
+ int ret;
+
+ ret = hyp_pin_shared_mem(host_kvm, host_kvm + 1);
+ if (ret)
+ return ret;
+
+ nr_vcpus = READ_ONCE(host_kvm->created_vcpus);
+ if (nr_vcpus < 1) {
+ ret = -EINVAL;
+ goto err_unpin_kvm;
+ }
+
+ vm_size = pkvm_get_hyp_vm_size(nr_vcpus);
+ pgd_size = kvm_pgtable_stage2_pgd_size(host_mmu.arch.mmu.vtcr);
+
+ ret = -ENOMEM;
+
+ hyp_vm = map_donated_memory(vm_hva, vm_size);
+ if (!hyp_vm)
+ goto err_remove_mappings;
+
+ pgd = map_donated_memory_noclear(pgd_hva, pgd_size);
+ if (!pgd)
+ goto err_remove_mappings;
+
+ init_pkvm_hyp_vm(host_kvm, hyp_vm, nr_vcpus);
+
+ hyp_spin_lock(&vm_table_lock);
+ ret = insert_vm_table_entry(host_kvm, hyp_vm);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = kvm_guest_prepare_stage2(hyp_vm, pgd);
+ if (ret)
+ goto err_remove_vm_table_entry;
+ hyp_spin_unlock(&vm_table_lock);
+
+ return hyp_vm->kvm.arch.pkvm.handle;
+
+err_remove_vm_table_entry:
+ remove_vm_table_entry(hyp_vm->kvm.arch.pkvm.handle);
+err_unlock:
+ hyp_spin_unlock(&vm_table_lock);
+err_remove_mappings:
+ unmap_donated_memory(hyp_vm, vm_size);
+ unmap_donated_memory(pgd, pgd_size);
+err_unpin_kvm:
+ hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
+ return ret;
+}
+
+/*
+ * Initialize the hypervisor copy of the protected vCPU state using the
+ * memory donated by the host.
+ *
+ * handle: The handle for the protected vm.
+ * host_vcpu: A pointer to the corresponding host vcpu.
+ * vcpu_hva: The host va of the area being donated for the vcpu state.
+ * Must be page aligned. The size of the area must be equal to
+ * the page-aligned size of 'struct pkvm_hyp_vcpu'.
+ * Return 0 on success, negative error code on failure.
+ */
+int __pkvm_init_vcpu(pkvm_handle_t handle, struct kvm_vcpu *host_vcpu,
+ unsigned long vcpu_hva)
+{
+ struct pkvm_hyp_vcpu *hyp_vcpu;
+ struct pkvm_hyp_vm *hyp_vm;
+ unsigned int idx;
+ int ret;
+
+ hyp_vcpu = map_donated_memory(vcpu_hva, sizeof(*hyp_vcpu));
+ if (!hyp_vcpu)
+ return -ENOMEM;
+
+ hyp_spin_lock(&vm_table_lock);
+
+ hyp_vm = get_vm_by_handle(handle);
+ if (!hyp_vm) {
+ ret = -ENOENT;
+ goto unlock;
+ }
+
+ idx = hyp_vm->nr_vcpus;
+ if (idx >= hyp_vm->kvm.created_vcpus) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ ret = init_pkvm_hyp_vcpu(hyp_vcpu, hyp_vm, host_vcpu, idx);
+ if (ret)
+ goto unlock;
+
+ hyp_vm->vcpus[idx] = hyp_vcpu;
+ hyp_vm->nr_vcpus++;
+unlock:
+ hyp_spin_unlock(&vm_table_lock);
+
+ if (ret)
+ unmap_donated_memory(hyp_vcpu, sizeof(*hyp_vcpu));
+
+ return ret;
+}
+
+static void
+teardown_donated_memory(struct kvm_hyp_memcache *mc, void *addr, size_t size)
+{
+ size = PAGE_ALIGN(size);
+ memset(addr, 0, size);
+
+ for (void *start = addr; start < addr + size; start += PAGE_SIZE)
+ push_hyp_memcache(mc, start, hyp_virt_to_phys);
+
+ unmap_donated_memory_noclear(addr, size);
+}
+
+int __pkvm_teardown_vm(pkvm_handle_t handle)
+{
+ struct kvm_hyp_memcache *mc;
+ struct pkvm_hyp_vm *hyp_vm;
+ struct kvm *host_kvm;
+ unsigned int idx;
+ size_t vm_size;
+ int err;
+
+ hyp_spin_lock(&vm_table_lock);
+ hyp_vm = get_vm_by_handle(handle);
+ if (!hyp_vm) {
+ err = -ENOENT;
+ goto err_unlock;
+ }
+
+ if (WARN_ON(hyp_page_count(hyp_vm))) {
+ err = -EBUSY;
+ goto err_unlock;
+ }
+
+ host_kvm = hyp_vm->host_kvm;
+
+ /* Ensure the VMID is clean before it can be reallocated */
+ __kvm_tlb_flush_vmid(&hyp_vm->kvm.arch.mmu);
+ remove_vm_table_entry(handle);
+ hyp_spin_unlock(&vm_table_lock);
+
+ /* Reclaim guest pages (including page-table pages) */
+ mc = &host_kvm->arch.pkvm.teardown_mc;
+ reclaim_guest_pages(hyp_vm, mc);
+ unpin_host_vcpus(hyp_vm->vcpus, hyp_vm->nr_vcpus);
+
+ /* Push the metadata pages to the teardown memcache */
+ for (idx = 0; idx < hyp_vm->nr_vcpus; ++idx) {
+ struct pkvm_hyp_vcpu *hyp_vcpu = hyp_vm->vcpus[idx];
+
+ teardown_donated_memory(mc, hyp_vcpu, sizeof(*hyp_vcpu));
+ }
+
+ vm_size = pkvm_get_hyp_vm_size(hyp_vm->kvm.created_vcpus);
+ teardown_donated_memory(mc, hyp_vm, vm_size);
+ hyp_unpin_shared_mem(host_kvm, host_kvm + 1);
+ return 0;
+
+err_unlock:
+ hyp_spin_unlock(&vm_table_lock);
+ return err;
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/psci-relay.c b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
index 08508783ec3d..d57bcb6ab94d 100644
--- a/arch/arm64/kvm/hyp/nvhe/psci-relay.c
+++ b/arch/arm64/kvm/hyp/nvhe/psci-relay.c
@@ -200,7 +200,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
__hyp_pa(init_params), 0);
}
-asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
+asmlinkage void __noreturn __kvm_host_psci_cpu_entry(bool is_cpu_on)
{
struct psci_boot_args *boot_args;
struct kvm_cpu_context *host_ctxt;
@@ -273,9 +273,8 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
}
}
-bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
+bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id)
{
- DECLARE_REG(u64, func_id, host_ctxt, 0);
unsigned long ret;
switch (kvm_host_psci_config.version) {
diff --git a/arch/arm64/kvm/hyp/nvhe/setup.c b/arch/arm64/kvm/hyp/nvhe/setup.c
index e8d4ea2fcfa0..bc58d1b515af 100644
--- a/arch/arm64/kvm/hyp/nvhe/setup.c
+++ b/arch/arm64/kvm/hyp/nvhe/setup.c
@@ -11,11 +11,13 @@
#include <asm/kvm_pkvm.h>
#include <nvhe/early_alloc.h>
+#include <nvhe/ffa.h>
#include <nvhe/fixed_config.h>
#include <nvhe/gfp.h>
#include <nvhe/memory.h>
#include <nvhe/mem_protect.h>
#include <nvhe/mm.h>
+#include <nvhe/pkvm.h>
#include <nvhe/trap_handler.h>
unsigned long hyp_nr_cpus;
@@ -24,23 +26,29 @@ unsigned long hyp_nr_cpus;
(unsigned long)__per_cpu_start)
static void *vmemmap_base;
+static void *vm_table_base;
static void *hyp_pgt_base;
static void *host_s2_pgt_base;
+static void *ffa_proxy_pages;
static struct kvm_pgtable_mm_ops pkvm_pgtable_mm_ops;
static struct hyp_pool hpool;
static int divide_memory_pool(void *virt, unsigned long size)
{
- unsigned long vstart, vend, nr_pages;
+ unsigned long nr_pages;
hyp_early_alloc_init(virt, size);
- hyp_vmemmap_range(__hyp_pa(virt), size, &vstart, &vend);
- nr_pages = (vend - vstart) >> PAGE_SHIFT;
+ nr_pages = hyp_vmemmap_pages(sizeof(struct hyp_page));
vmemmap_base = hyp_early_alloc_contig(nr_pages);
if (!vmemmap_base)
return -ENOMEM;
+ nr_pages = hyp_vm_table_pages();
+ vm_table_base = hyp_early_alloc_contig(nr_pages);
+ if (!vm_table_base)
+ return -ENOMEM;
+
nr_pages = hyp_s1_pgtable_pages();
hyp_pgt_base = hyp_early_alloc_contig(nr_pages);
if (!hyp_pgt_base)
@@ -51,6 +59,11 @@ static int divide_memory_pool(void *virt, unsigned long size)
if (!host_s2_pgt_base)
return -ENOMEM;
+ nr_pages = hyp_ffa_proxy_pages();
+ ffa_proxy_pages = hyp_early_alloc_contig(nr_pages);
+ if (!ffa_proxy_pages)
+ return -ENOMEM;
+
return 0;
}
@@ -78,7 +91,7 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
if (ret)
return ret;
- ret = hyp_back_vmemmap(phys, size, hyp_virt_to_phys(vmemmap_base));
+ ret = hyp_back_vmemmap(hyp_virt_to_phys(vmemmap_base));
if (ret)
return ret;
@@ -100,7 +113,6 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
for (i = 0; i < hyp_nr_cpus; i++) {
struct kvm_nvhe_init_params *params = per_cpu_ptr(&kvm_init_params, i);
- unsigned long hyp_addr;
start = (void *)kern_hyp_va(per_cpu_base[i]);
end = start + PAGE_ALIGN(hyp_percpu_size);
@@ -108,50 +120,23 @@ static int recreate_hyp_mappings(phys_addr_t phys, unsigned long size,
if (ret)
return ret;
- /*
- * Allocate a contiguous HYP private VA range for the stack
- * and guard page. The allocation is also aligned based on
- * the order of its size.
- */
- ret = pkvm_alloc_private_va_range(PAGE_SIZE * 2, &hyp_addr);
- if (ret)
- return ret;
-
- /*
- * Since the stack grows downwards, map the stack to the page
- * at the higher address and leave the lower guard page
- * unbacked.
- *
- * Any valid stack address now has the PAGE_SHIFT bit as 1
- * and addresses corresponding to the guard page have the
- * PAGE_SHIFT bit as 0 - this is used for overflow detection.
- */
- hyp_spin_lock(&pkvm_pgd_lock);
- ret = kvm_pgtable_hyp_map(&pkvm_pgtable, hyp_addr + PAGE_SIZE,
- PAGE_SIZE, params->stack_pa, PAGE_HYP);
- hyp_spin_unlock(&pkvm_pgd_lock);
+ ret = pkvm_create_stack(params->stack_pa, &params->stack_hyp_va);
if (ret)
return ret;
-
- /* Update stack_hyp_va to end of the stack's private VA range */
- params->stack_hyp_va = hyp_addr + (2 * PAGE_SIZE);
}
/*
- * Map the host's .bss and .rodata sections RO in the hypervisor, but
- * transfer the ownership from the host to the hypervisor itself to
- * make sure it can't be donated or shared with another entity.
+ * Map the host sections RO in the hypervisor, but transfer the
+ * ownership from the host to the hypervisor itself to make sure they
+ * can't be donated or shared with another entity.
*
* The ownership transition requires matching changes in the host
* stage-2. This will be done later (see finalize_host_mappings()) once
* the hyp_vmemmap is addressable.
*/
prot = pkvm_mkstate(PAGE_HYP_RO, PKVM_PAGE_SHARED_OWNED);
- ret = pkvm_create_mappings(__start_rodata, __end_rodata, prot);
- if (ret)
- return ret;
-
- ret = pkvm_create_mappings(__hyp_bss_end, __bss_stop, prot);
+ ret = pkvm_create_mappings(&kvm_vgic_global_state,
+ &kvm_vgic_global_state + 1, prot);
if (ret)
return ret;
@@ -186,33 +171,20 @@ static void hpool_put_page(void *addr)
hyp_put_page(&hpool, addr);
}
-static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
- kvm_pte_t *ptep,
- enum kvm_pgtable_walk_flags flag,
- void * const arg)
+static int fix_host_ownership_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
{
- struct kvm_pgtable_mm_ops *mm_ops = arg;
enum kvm_pgtable_prot prot;
enum pkvm_page_state state;
- kvm_pte_t pte = *ptep;
phys_addr_t phys;
- if (!kvm_pte_valid(pte))
- return 0;
-
- /*
- * Fix-up the refcount for the page-table pages as the early allocator
- * was unable to access the hyp_vmemmap and so the buddy allocator has
- * initialised the refcount to '1'.
- */
- mm_ops->get_page(ptep);
- if (flag != KVM_PGTABLE_WALK_LEAF)
+ if (!kvm_pte_valid(ctx->old))
return 0;
- if (level != (KVM_PGTABLE_MAX_LEVELS - 1))
+ if (ctx->level != KVM_PGTABLE_LAST_LEVEL)
return -EINVAL;
- phys = kvm_pte_to_phys(pte);
+ phys = kvm_pte_to_phys(ctx->old);
if (!addr_is_memory(phys))
return -EINVAL;
@@ -220,10 +192,10 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
* Adjust the host stage-2 mappings to match the ownership attributes
* configured in the hypervisor stage-1.
*/
- state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(pte));
+ state = pkvm_getstate(kvm_pgtable_hyp_pte_prot(ctx->old));
switch (state) {
case PKVM_PAGE_OWNED:
- return host_stage2_set_owner_locked(phys, PAGE_SIZE, pkvm_hyp_id);
+ return host_stage2_set_owner_locked(phys, PAGE_SIZE, PKVM_ID_HYP);
case PKVM_PAGE_SHARED_OWNED:
prot = pkvm_mkstate(PKVM_HOST_MEM_PROT, PKVM_PAGE_SHARED_BORROWED);
break;
@@ -237,12 +209,25 @@ static int finalize_host_mappings_walker(u64 addr, u64 end, u32 level,
return host_stage2_idmap_locked(phys, PAGE_SIZE, prot);
}
-static int finalize_host_mappings(void)
+static int fix_hyp_pgtable_refcnt_walker(const struct kvm_pgtable_visit_ctx *ctx,
+ enum kvm_pgtable_walk_flags visit)
+{
+ /*
+ * Fix-up the refcount for the page-table pages as the early allocator
+ * was unable to access the hyp_vmemmap and so the buddy allocator has
+ * initialised the refcount to '1'.
+ */
+ if (kvm_pte_valid(ctx->old))
+ ctx->mm_ops->get_page(ctx->ptep);
+
+ return 0;
+}
+
+static int fix_host_ownership(void)
{
struct kvm_pgtable_walker walker = {
- .cb = finalize_host_mappings_walker,
- .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
- .arg = pkvm_pgtable.mm_ops,
+ .cb = fix_host_ownership_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF,
};
int i, ret;
@@ -258,6 +243,18 @@ static int finalize_host_mappings(void)
return 0;
}
+static int fix_hyp_pgtable_refcnt(void)
+{
+ struct kvm_pgtable_walker walker = {
+ .cb = fix_hyp_pgtable_refcnt_walker,
+ .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
+ .arg = pkvm_pgtable.mm_ops,
+ };
+
+ return kvm_pgtable_walk(&pkvm_pgtable, 0, BIT(pkvm_pgtable.ia_bits),
+ &walker);
+}
+
void __noreturn __pkvm_init_finalise(void)
{
struct kvm_host_data *host_data = this_cpu_ptr(&kvm_host_data);
@@ -287,10 +284,23 @@ void __noreturn __pkvm_init_finalise(void)
};
pkvm_pgtable.mm_ops = &pkvm_pgtable_mm_ops;
- ret = finalize_host_mappings();
+ ret = fix_host_ownership();
+ if (ret)
+ goto out;
+
+ ret = fix_hyp_pgtable_refcnt();
+ if (ret)
+ goto out;
+
+ ret = hyp_create_pcpu_fixmap();
+ if (ret)
+ goto out;
+
+ ret = hyp_ffa_init(ffa_proxy_pages);
if (ret)
goto out;
+ pkvm_hyp_vm_table_init(vm_table_base);
out:
/*
* We tail-called to here from handle___pkvm_init() and will not return,
diff --git a/arch/arm64/kvm/hyp/nvhe/stacktrace.c b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
new file mode 100644
index 000000000000..ed6b58b19cfa
--- /dev/null
+++ b/arch/arm64/kvm/hyp/nvhe/stacktrace.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * KVM nVHE hypervisor stack tracing support.
+ *
+ * Copyright (C) 2022 Google LLC
+ */
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+#include <asm/memory.h>
+#include <asm/percpu.h>
+
+DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
+ __aligned(16);
+
+DEFINE_PER_CPU(struct kvm_nvhe_stacktrace_info, kvm_stacktrace_info);
+
+/*
+ * hyp_prepare_backtrace - Prepare non-protected nVHE backtrace.
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Save the information needed by the host to unwind the non-protected
+ * nVHE hypervisor stack in EL1.
+ */
+static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
+{
+ struct kvm_nvhe_stacktrace_info *stacktrace_info = this_cpu_ptr(&kvm_stacktrace_info);
+ struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
+
+ stacktrace_info->stack_base = (unsigned long)(params->stack_hyp_va - PAGE_SIZE);
+ stacktrace_info->overflow_stack_base = (unsigned long)this_cpu_ptr(overflow_stack);
+ stacktrace_info->fp = fp;
+ stacktrace_info->pc = pc;
+}
+
+#ifdef CONFIG_PROTECTED_NVHE_STACKTRACE
+#include <asm/stacktrace/nvhe.h>
+
+DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
+
+static struct stack_info stackinfo_get_overflow(void)
+{
+ unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
+ unsigned long high = low + OVERFLOW_STACK_SIZE;
+
+ return (struct stack_info) {
+ .low = low,
+ .high = high,
+ };
+}
+
+static struct stack_info stackinfo_get_hyp(void)
+{
+ struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
+ unsigned long high = params->stack_hyp_va;
+ unsigned long low = high - PAGE_SIZE;
+
+ return (struct stack_info) {
+ .low = low,
+ .high = high,
+ };
+}
+
+static int unwind_next(struct unwind_state *state)
+{
+ return unwind_next_frame_record(state);
+}
+
+static void notrace unwind(struct unwind_state *state,
+ stack_trace_consume_fn consume_entry,
+ void *cookie)
+{
+ while (1) {
+ int ret;
+
+ if (!consume_entry(cookie, state->pc))
+ break;
+ ret = unwind_next(state);
+ if (ret < 0)
+ break;
+ }
+}
+
+/*
+ * pkvm_save_backtrace_entry - Saves a protected nVHE HYP stacktrace entry
+ *
+ * @arg : index of the entry in the stacktrace buffer
+ * @where : the program counter corresponding to the stack frame
+ *
+ * Save the return address of a stack frame to the shared stacktrace buffer.
+ * The host can access this shared buffer from EL1 to dump the backtrace.
+ */
+static bool pkvm_save_backtrace_entry(void *arg, unsigned long where)
+{
+ unsigned long *stacktrace = this_cpu_ptr(pkvm_stacktrace);
+ int *idx = (int *)arg;
+
+ /*
+ * Need 2 free slots: 1 for current entry and 1 for the
+ * delimiter.
+ */
+ if (*idx > ARRAY_SIZE(pkvm_stacktrace) - 2)
+ return false;
+
+ stacktrace[*idx] = where;
+ stacktrace[++*idx] = 0UL;
+
+ return true;
+}
+
+/*
+ * pkvm_save_backtrace - Saves the protected nVHE HYP stacktrace
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Save the unwinded stack addresses to the shared stacktrace buffer.
+ * The host can access this shared buffer from EL1 to dump the backtrace.
+ */
+static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
+{
+ struct stack_info stacks[] = {
+ stackinfo_get_overflow(),
+ stackinfo_get_hyp(),
+ };
+ struct unwind_state state = {
+ .stacks = stacks,
+ .nr_stacks = ARRAY_SIZE(stacks),
+ };
+ int idx = 0;
+
+ kvm_nvhe_unwind_init(&state, fp, pc);
+
+ unwind(&state, pkvm_save_backtrace_entry, &idx);
+}
+#else /* !CONFIG_PROTECTED_NVHE_STACKTRACE */
+static void pkvm_save_backtrace(unsigned long fp, unsigned long pc)
+{
+}
+#endif /* CONFIG_PROTECTED_NVHE_STACKTRACE */
+
+/*
+ * kvm_nvhe_prepare_backtrace - prepare to dump the nVHE backtrace
+ *
+ * @fp : frame pointer at which to start the unwinding.
+ * @pc : program counter at which to start the unwinding.
+ *
+ * Saves the information needed by the host to dump the nVHE hypervisor
+ * backtrace.
+ */
+void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc)
+{
+ if (is_protected_kvm_enabled())
+ pkvm_save_backtrace(fp, pc);
+ else
+ hyp_prepare_backtrace(fp, pc);
+}
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c
index 6db801db8f27..c50f8459e4fc 100644
--- a/arch/arm64/kvm/hyp/nvhe/switch.c
+++ b/arch/arm64/kvm/hyp/nvhe/switch.c
@@ -34,6 +34,8 @@ DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
+extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
+
static void __activate_traps(struct kvm_vcpu *vcpu)
{
u64 val;
@@ -42,29 +44,28 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
__activate_traps_common(vcpu);
val = vcpu->arch.cptr_el2;
- val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
- if (!update_fp_enabled(vcpu)) {
- val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
- __activate_traps_fpsimd32(vcpu);
+ val |= CPTR_EL2_TAM; /* Same bit irrespective of E2H */
+ val |= has_hvhe() ? CPACR_EL1_TTA : CPTR_EL2_TTA;
+ if (cpus_have_final_cap(ARM64_SME)) {
+ if (has_hvhe())
+ val &= ~(CPACR_EL1_SMEN_EL1EN | CPACR_EL1_SMEN_EL0EN);
+ else
+ val |= CPTR_EL2_TSM;
}
- if (cpus_have_final_cap(ARM64_SME))
- val |= CPTR_EL2_TSM;
- write_sysreg(val, cptr_el2);
- write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
+ if (!guest_owns_fp_regs(vcpu)) {
+ if (has_hvhe())
+ val &= ~(CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
+ CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN);
+ else
+ val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
- if (cpus_have_final_cap(ARM64_SME)) {
- val = read_sysreg_s(SYS_HFGRTR_EL2);
- val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
- HFGxTR_EL2_nSMPRI_EL1_MASK);
- write_sysreg_s(val, SYS_HFGRTR_EL2);
-
- val = read_sysreg_s(SYS_HFGWTR_EL2);
- val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
- HFGxTR_EL2_nSMPRI_EL1_MASK);
- write_sysreg_s(val, SYS_HFGWTR_EL2);
+ __activate_traps_fpsimd32(vcpu);
}
+ kvm_write_cptr_el2(val);
+ write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
@@ -83,7 +84,6 @@ static void __activate_traps(struct kvm_vcpu *vcpu)
static void __deactivate_traps(struct kvm_vcpu *vcpu)
{
extern char __kvm_hyp_host_vector[];
- u64 cptr;
___deactivate_traps(vcpu);
@@ -108,27 +108,7 @@ static void __deactivate_traps(struct kvm_vcpu *vcpu)
write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
- if (cpus_have_final_cap(ARM64_SME)) {
- u64 val;
-
- val = read_sysreg_s(SYS_HFGRTR_EL2);
- val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
- HFGxTR_EL2_nSMPRI_EL1_MASK;
- write_sysreg_s(val, SYS_HFGRTR_EL2);
-
- val = read_sysreg_s(SYS_HFGWTR_EL2);
- val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
- HFGxTR_EL2_nSMPRI_EL1_MASK;
- write_sysreg_s(val, SYS_HFGWTR_EL2);
- }
-
- cptr = CPTR_EL2_DEFAULT;
- if (vcpu_has_sve(vcpu) && (vcpu->arch.flags & KVM_ARM64_FP_ENABLED))
- cptr |= CPTR_EL2_TZ;
- if (cpus_have_final_cap(ARM64_SME))
- cptr &= ~CPTR_EL2_TSM;
-
- write_sysreg(cptr, cptr_el2);
+ kvm_reset_cptr_el2(vcpu);
write_sysreg(__kvm_hyp_host_vector, vbar_el2);
}
@@ -141,7 +121,7 @@ static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
}
}
-/* Restore VGICv3 state on non_VEH systems */
+/* Restore VGICv3 state on non-VHE systems */
static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
{
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
@@ -210,7 +190,9 @@ static const exit_handler_fn hyp_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
+ [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
+ [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
};
static const exit_handler_fn pvm_exit_handlers[] = {
@@ -220,7 +202,9 @@ static const exit_handler_fn pvm_exit_handlers[] = {
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
+ [ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
+ [ESR_ELx_EC_MOPS] = kvm_hyp_handle_mops,
};
static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
@@ -254,7 +238,7 @@ static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
* KVM_ARM_VCPU_INIT, however, this is likely not possible for
* protected VMs.
*/
- vcpu->arch.target = -1;
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
*exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
*exit_code |= ARM_EXCEPTION_IL;
}
@@ -296,6 +280,17 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
*/
__debug_save_host_buffers_nvhe(vcpu);
+ /*
+ * We're about to restore some new MMU state. Make sure
+ * ongoing page-table walks that have started before we
+ * trapped to EL2 have completed. This also synchronises the
+ * above disabling of SPE and TRBE.
+ *
+ * See DDI0487I.a D8.1.5 "Out-of-context translation regimes",
+ * rule R_LFHQG and subsequent information statements.
+ */
+ dsb(nsh);
+
__kvm_adjust_pc(vcpu);
/*
@@ -330,12 +325,19 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
__timer_disable_traps(vcpu);
__hyp_vgic_save_state(vcpu);
+ /*
+ * Same thing as before the guest run: we're about to switch
+ * the MMU context, so let's make sure we don't have any
+ * ongoing EL1&0 translations.
+ */
+ dsb(nsh);
+
__deactivate_traps(vcpu);
__load_host_stage2();
__sysreg_restore_state_nvhe(host_ctxt);
- if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
+ if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
__fpsimd_save_fpexc32(vcpu);
__debug_switch_to_host(vcpu);
@@ -375,6 +377,10 @@ asmlinkage void __noreturn hyp_panic(void)
__sysreg_restore_state_nvhe(host_ctxt);
}
+ /* Prepare to dump kvm nvhe hyp stacktrace */
+ kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
+ _THIS_IP_);
+
__hyp_do_panic(host_ctxt, spsr, elr, par);
unreachable();
}
@@ -386,5 +392,5 @@ asmlinkage void __noreturn hyp_panic_bad_stack(void)
asmlinkage void kvm_unexpected_el2_exception(void)
{
- return __kvm_unexpected_el2_exception();
+ __kvm_unexpected_el2_exception();
}
diff --git a/arch/arm64/kvm/hyp/nvhe/sys_regs.c b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
index 6b94c3e6ff26..edd969a1f36b 100644
--- a/arch/arm64/kvm/hyp/nvhe/sys_regs.c
+++ b/arch/arm64/kvm/hyp/nvhe/sys_regs.c
@@ -26,6 +26,7 @@ u64 id_aa64isar2_el1_sys_val;
u64 id_aa64mmfr0_el1_sys_val;
u64 id_aa64mmfr1_el1_sys_val;
u64 id_aa64mmfr2_el1_sys_val;
+u64 id_aa64smfr0_el1_sys_val;
/*
* Inject an unknown/undefined exception to an AArch64 guest while most of its
@@ -38,9 +39,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
*vcpu_cpsr(vcpu) = read_sysreg_el2(SYS_SPSR);
- vcpu->arch.flags |= (KVM_ARM64_EXCEPT_AA64_EL1 |
- KVM_ARM64_EXCEPT_AA64_ELx_SYNC |
- KVM_ARM64_PENDING_EXCEPTION);
+ kvm_pend_exception(vcpu, EXCEPT_AA64_EL1_SYNC);
__kvm_adjust_pc(vcpu);
@@ -86,19 +85,12 @@ static u64 get_restricted_features_unsigned(u64 sys_reg_val,
static u64 get_pvm_id_aa64pfr0(const struct kvm_vcpu *vcpu)
{
- const struct kvm *kvm = (const struct kvm *)kern_hyp_va(vcpu->kvm);
u64 set_mask = 0;
u64 allow_mask = PVM_ID_AA64PFR0_ALLOW;
set_mask |= get_restricted_features_unsigned(id_aa64pfr0_el1_sys_val,
PVM_ID_AA64PFR0_RESTRICT_UNSIGNED);
- /* Spectre and Meltdown mitigation in KVM */
- set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV2),
- (u64)kvm->arch.pfr0_csv2);
- set_mask |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_CSV3),
- (u64)kvm->arch.pfr0_csv3);
-
return (id_aa64pfr0_el1_sys_val & allow_mask) | set_mask;
}
@@ -108,7 +100,7 @@ static u64 get_pvm_id_aa64pfr1(const struct kvm_vcpu *vcpu)
u64 allow_mask = PVM_ID_AA64PFR1_ALLOW;
if (!kvm_has_mte(kvm))
- allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_MTE);
+ allow_mask &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
return id_aa64pfr1_el1_sys_val & allow_mask;
}
@@ -283,8 +275,8 @@ static bool pvm_access_id_aarch32(struct kvm_vcpu *vcpu,
* No support for AArch32 guests, therefore, pKVM has no sanitized copy
* of AArch32 feature id registers.
*/
- BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1),
- PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_ELx_64BIT_ONLY);
+ BUILD_BUG_ON(FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL1),
+ PVM_ID_AA64PFR0_RESTRICT_UNSIGNED) > ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
return pvm_access_raz_wi(vcpu, p, r);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/timer-sr.c b/arch/arm64/kvm/hyp/nvhe/timer-sr.c
index 9072e71693ba..3aaab20ae5b4 100644
--- a/arch/arm64/kvm/hyp/nvhe/timer-sr.c
+++ b/arch/arm64/kvm/hyp/nvhe/timer-sr.c
@@ -9,6 +9,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_hyp.h>
+#include <asm/kvm_mmu.h>
void __kvm_timer_set_cntvoff(u64 cntvoff)
{
@@ -16,33 +17,46 @@ void __kvm_timer_set_cntvoff(u64 cntvoff)
}
/*
- * Should only be called on non-VHE systems.
+ * Should only be called on non-VHE or hVHE setups.
* VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe().
*/
void __timer_disable_traps(struct kvm_vcpu *vcpu)
{
- u64 val;
+ u64 val, shift = 0;
+
+ if (has_hvhe())
+ shift = 10;
/* Allow physical timer/counter access for the host */
val = read_sysreg(cnthctl_el2);
- val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+ val |= (CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN) << shift;
write_sysreg(val, cnthctl_el2);
}
/*
- * Should only be called on non-VHE systems.
+ * Should only be called on non-VHE or hVHE setups.
* VHE systems use EL2 timers and configure EL1 timers in kvm_timer_init_vhe().
*/
void __timer_enable_traps(struct kvm_vcpu *vcpu)
{
- u64 val;
+ u64 clr = 0, set = 0;
/*
* Disallow physical timer access for the guest
- * Physical counter access is allowed
+ * Physical counter access is allowed if no offset is enforced
+ * or running protected (we don't offset anything in this case).
*/
- val = read_sysreg(cnthctl_el2);
- val &= ~CNTHCTL_EL1PCEN;
- val |= CNTHCTL_EL1PCTEN;
- write_sysreg(val, cnthctl_el2);
+ clr = CNTHCTL_EL1PCEN;
+ if (is_protected_kvm_enabled() ||
+ !kern_hyp_va(vcpu->kvm)->arch.timer_data.poffset)
+ set |= CNTHCTL_EL1PCTEN;
+ else
+ clr |= CNTHCTL_EL1PCTEN;
+
+ if (has_hvhe()) {
+ clr <<= 10;
+ set <<= 10;
+ }
+
+ sysreg_clear_set(cnthctl_el2, clr, set);
}
diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c
index d296d617f589..a60fb13e2192 100644
--- a/arch/arm64/kvm/hyp/nvhe/tlb.c
+++ b/arch/arm64/kvm/hyp/nvhe/tlb.c
@@ -15,8 +15,31 @@ struct tlb_inv_context {
};
static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
- struct tlb_inv_context *cxt)
+ struct tlb_inv_context *cxt,
+ bool nsh)
{
+ /*
+ * We have two requirements:
+ *
+ * - ensure that the page table updates are visible to all
+ * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN
+ * being either ish or nsh, depending on the invalidation
+ * type.
+ *
+ * - complete any speculative page table walk started before
+ * we trapped to EL2 so that we can mess with the MM
+ * registers out of context, for which dsb(nsh) is enough
+ *
+ * The composition of these two barriers is a dsb(DOMAIN), and
+ * the 'nsh' parameter tracks the distinction between
+ * Inner-Shareable and Non-Shareable, as specified by the
+ * callers.
+ */
+ if (nsh)
+ dsb(nsh);
+ else
+ dsb(ish);
+
if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
u64 val;
@@ -60,10 +83,8 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
{
struct tlb_inv_context cxt;
- dsb(ishst);
-
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ __tlb_switch_to_guest(mmu, &cxt, false);
/*
* We could do so much better if we had the VA as well.
@@ -84,27 +105,61 @@ void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu,
dsb(ish);
isb();
+ __tlb_switch_to_host(&cxt);
+}
+
+void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
+ phys_addr_t ipa, int level)
+{
+ struct tlb_inv_context cxt;
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt, true);
+
/*
- * If the host is running at EL1 and we have a VPIPT I-cache,
- * then we must perform I-cache maintenance at EL2 in order for
- * it to have an effect on the guest. Since the guest cannot hit
- * I-cache lines allocated with a different VMID, we don't need
- * to worry about junk out of guest reset (we nuke the I-cache on
- * VMID rollover), but we do need to be careful when remapping
- * executable pages for the same guest. This can happen when KSM
- * takes a CoW fault on an executable page, copies the page into
- * a page that was previously mapped in the guest and then needs
- * to invalidate the guest view of the I-cache for that page
- * from EL1. To solve this, we invalidate the entire I-cache when
- * unmapping a page from a guest if we have a VPIPT I-cache but
- * the host is running at EL1. As above, we could do better if
- * we had the VA.
- *
- * The moral of this story is: if you have a VPIPT I-cache, then
- * you should be running with VHE enabled.
+ * We could do so much better if we had the VA as well.
+ * Instead, we invalidate Stage-2 for this IPA, and the
+ * whole of Stage-1. Weep...
+ */
+ ipa >>= 12;
+ __tlbi_level(ipas2e1, ipa, level);
+
+ /*
+ * We have to ensure completion of the invalidation at Stage-2,
+ * since a table walk on another CPU could refill a TLB with a
+ * complete (S1 + S2) walk based on the old Stage-2 mapping if
+ * the Stage-1 invalidation happened first.
+ */
+ dsb(nsh);
+ __tlbi(vmalle1);
+ dsb(nsh);
+ isb();
+
+ __tlb_switch_to_host(&cxt);
+}
+
+void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
+ phys_addr_t start, unsigned long pages)
+{
+ struct tlb_inv_context cxt;
+ unsigned long stride;
+
+ /*
+ * Since the range of addresses may not be mapped at
+ * the same level, assume the worst case as PAGE_SIZE
*/
- if (icache_is_vpipt())
- icache_inval_all_pou();
+ stride = PAGE_SIZE;
+ start = round_down(start, stride);
+
+ /* Switch to requested VMID */
+ __tlb_switch_to_guest(mmu, &cxt, false);
+
+ __flush_s2_tlb_range_op(ipas2e1is, start, pages, stride, 0);
+
+ dsb(ish);
+ __tlbi(vmalle1is);
+ dsb(ish);
+ isb();
__tlb_switch_to_host(&cxt);
}
@@ -113,10 +168,8 @@ void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu)
{
struct tlb_inv_context cxt;
- dsb(ishst);
-
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ __tlb_switch_to_guest(mmu, &cxt, false);
__tlbi(vmalls12e1is);
dsb(ish);
@@ -130,7 +183,7 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
struct tlb_inv_context cxt;
/* Switch to requested VMID */
- __tlb_switch_to_guest(mmu, &cxt);
+ __tlb_switch_to_guest(mmu, &cxt, false);
__tlbi(vmalle1);
asm volatile("ic iallu");
@@ -142,20 +195,8 @@ void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu)
void __kvm_flush_vm_context(void)
{
- dsb(ishst);
+ /* Same remark as in __tlb_switch_to_guest() */
+ dsb(ish);
__tlbi(alle1is);
-
- /*
- * VIPT and PIPT caches are not affected by VMID, so no maintenance
- * is necessary across a VMID rollover.
- *
- * VPIPT caches constrain lookup and maintenance to the active VMID,
- * so we need to invalidate lines with a stale VMID to avoid an ABA
- * race after multiple rollovers.
- *
- */
- if (icache_is_vpipt())
- asm volatile("ic ialluis");
-
dsb(ish);
}