aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm/hyp/tlb.c
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2019-07-30 10:50:38 +0100
committerMarc Zyngier <maz@kernel.org>2019-10-26 10:43:53 +0100
commit37553941c670c3ad160b25843e6cdcbee2b3c6eb (patch)
tree00fb402c79f8be22175a38bcb6fcd7c072d08a5a /arch/arm64/kvm/hyp/tlb.c
parentarm64: KVM: Reorder system register restoration and stage-2 activation (diff)
downloadlinux-dev-37553941c670c3ad160b25843e6cdcbee2b3c6eb.tar.xz
linux-dev-37553941c670c3ad160b25843e6cdcbee2b3c6eb.zip
arm64: KVM: Disable EL1 PTW when invalidating S2 TLBs
When erratum 1319367 is being worked around, special care must be taken not to allow the page table walker to populate TLBs while we have the stage-2 translation enabled (which would otherwise result in a bizare mix of the host S1 and the guest S2). We enforce this by setting TCR_EL1.EPD{0,1} before restoring the S2 configuration, and clear the same bits after having disabled S2. Reviewed-by: James Morse <james.morse@arm.com> Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/hyp/tlb.c')
-rw-r--r--arch/arm64/kvm/hyp/tlb.c23
1 files changed, 23 insertions, 0 deletions
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index eb0efc5557f3..c2bc17ca6430 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -63,6 +63,22 @@ static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ u64 val;
+
+ /*
+ * For CPUs that are affected by ARM 1319367, we need to
+ * avoid a host Stage-1 walk while we have the guest's
+ * VMID set in the VTTBR in order to invalidate TLBs.
+ * We're guaranteed that the S1 MMU is enabled, so we can
+ * simply set the EPD bits to avoid any further TLB fill.
+ */
+ val = cxt->tcr = read_sysreg_el1(SYS_TCR);
+ val |= TCR_EPD1_MASK | TCR_EPD0_MASK;
+ write_sysreg_el1(val, SYS_TCR);
+ isb();
+ }
+
__load_guest_stage2(kvm);
isb();
}
@@ -100,6 +116,13 @@ static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm,
struct tlb_inv_context *cxt)
{
write_sysreg(0, vttbr_el2);
+
+ if (cpus_have_const_cap(ARM64_WORKAROUND_1319367)) {
+ /* Ensure write of the host VMID */
+ isb();
+ /* Restore the host's TCR_EL1 */
+ write_sysreg_el1(cxt->tcr, SYS_TCR);
+ }
}
static void __hyp_text __tlb_switch_to_host(struct kvm *kvm,