aboutsummaryrefslogtreecommitdiffstatshomepage
diff options
context:
space:
mode:
authorOliver Upton <oliver.upton@linux.dev>2025-05-23 12:47:18 -0700
committerMarc Zyngier <maz@kernel.org>2025-05-30 09:11:28 +0100
commit761aabe76e6b1eb8850e72141cb026c7057e46fd (patch)
treec588a01da0a90d2d43178cfa0b7f2c9cd0526801
parentKVM: arm64: Mask out non-VA bits from TLBI VA* on VNCR invalidation (diff)
downloadwireguard-linux-761aabe76e6b1eb8850e72141cb026c7057e46fd.tar.xz
wireguard-linux-761aabe76e6b1eb8850e72141cb026c7057e46fd.zip
KVM: arm64: Use lock guard in vgic_v4_set_forwarding()
The locking dance is about to get more interesting, switch the its_lock over to a lock guard to make it a bit easier to handle. Tested-by: Sweet Tea Dorminy <sweettea-kernel@dorminy.me> Signed-off-by: Oliver Upton <oliver.upton@linux.dev> Link: https://lore.kernel.org/r/20250523194722.4066715-2-oliver.upton@linux.dev Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm64/kvm/vgic/vgic-v4.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/arch/arm64/kvm/vgic/vgic-v4.c b/arch/arm64/kvm/vgic/vgic-v4.c
index c7de6154627c..8b25e7650998 100644
--- a/arch/arm64/kvm/vgic/vgic-v4.c
+++ b/arch/arm64/kvm/vgic/vgic-v4.c
@@ -444,7 +444,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
if (IS_ERR(its))
return 0;
- mutex_lock(&its->its_lock);
+ guard(mutex)(&its->its_lock);
/*
* Perform the actual DevID/EventID -> LPI translation.
@@ -455,11 +455,11 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
*/
if (vgic_its_resolve_lpi(kvm, its, irq_entry->msi.devid,
irq_entry->msi.data, &irq))
- goto out;
+ return 0;
/* Silently exit if the vLPI is already mapped */
if (irq->hw)
- goto out;
+ return 0;
/*
* Emit the mapping request. If it fails, the ITS probably
@@ -479,7 +479,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
ret = its_map_vlpi(virq, &map);
if (ret)
- goto out;
+ return ret;
irq->hw = true;
irq->host_irq = virq;
@@ -503,8 +503,6 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq,
raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
}
-out:
- mutex_unlock(&its->its_lock);
return ret;
}