aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2025-05-20 15:41:16 +0100
committerMarc Zyngier <maz@kernel.org>2025-05-21 09:53:08 +0100
commitd43548f422f27219eff5ce1897336af2c4f15091 (patch)
treeac243af94ceb55f7a6c908f5c0781543516ad27a
parentKVM: arm64: Document NV caps and vcpu flags (diff)
downloadwireguard-linux-d43548f422f27219eff5ce1897336af2c4f15091.tar.xz
wireguard-linux-d43548f422f27219eff5ce1897336af2c4f15091.zip
KVM: arm64: nv: Hold mmu_lock when invalidating VNCR SW-TLB before translating
When translating a VNCR translation fault, we start by marking the current SW-managed TLB as invalid, so that we can populate it in place. This is, however, done without the mmu_lock held. A consequence of this is that another CPU dealing with TLBI emulation can observe a translation still flagged as valid, but with invalid walk results (such as pgshift being 0). Bad things can result from this, such as a BUG() in pgshift_level_to_ttl(). Fix it by taking the mmu_lock for write to perform this local invalidation, and use invalidate_vncr() instead of open-coding the write to the 'valid' flag. Fixes: 069a05e535496 ("KVM: arm64: nv: Handle VNCR_EL2-triggered faults") Reviewed-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20250520144116.3667978-1-maz@kernel.org Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/kvm/nested.c25
1 files changed, 18 insertions, 7 deletions
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 6a9fd4e0e789..56b732003caa 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -1179,13 +1179,24 @@ static int kvm_translate_vncr(struct kvm_vcpu *vcpu)
vt = vcpu->arch.vncr_tlb;
- vt->wi = (struct s1_walk_info) {
- .regime = TR_EL20,
- .as_el0 = false,
- .pan = false,
- };
- vt->wr = (struct s1_walk_result){};
- vt->valid = false;
+ /*
+ * If we're about to walk the EL2 S1 PTs, we must invalidate the
+ * current TLB, as it could be sampled from another vcpu doing a
+ * TLBI *IS. A real CPU wouldn't do that, but we only keep a single
+ * translation, so not much of a choice.
+ *
+ * We also prepare the next walk wilst we're at it.
+ */
+ scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
+ invalidate_vncr(vt);
+
+ vt->wi = (struct s1_walk_info) {
+ .regime = TR_EL20,
+ .as_el0 = false,
+ .pan = false,
+ };
+ vt->wr = (struct s1_walk_result){};
+ }
guard(srcu)(&vcpu->kvm->srcu);