aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/vgic/vgic-mmio-v3.c
diff options
context:
space:
mode:
authorChristoffer Dall <cdall@linaro.org>2016-10-16 22:19:11 +0200
committerChristoffer Dall <christoffer.dall@linaro.org>2017-11-06 16:23:10 +0100
commit006df0f34930e18d0aa52f05705bdfe1fc565943 (patch)
tree97d97e06b56113a033ed63ce7ad16096779804f5 /virt/kvm/arm/vgic/vgic-mmio-v3.c
parentKVM: arm/arm64: Guard kvm_vgic_map_is_active against !vgic_initialized (diff)
downloadlinux-dev-006df0f34930e18d0aa52f05705bdfe1fc565943.tar.xz
linux-dev-006df0f34930e18d0aa52f05705bdfe1fc565943.zip
KVM: arm/arm64: Support calling vgic_update_irq_pending from irq context
We are about to optimize our timer handling logic which involves injecting irqs to the vgic directly from the irq handler. Unfortunately, the injection path can take any AP list lock and irq lock and we must therefore make sure to use spin_lock_irqsave where ever interrupts are enabled and we are taking any of those locks, to avoid deadlocking between process context and the ISR. This changes a lot of the VGIC code, but the good news are that the changes are mostly mechanical. Acked-by: Marc Zyngier <marc,zyngier@arm.com> Signed-off-by: Christoffer Dall <cdall@linaro.org>
Diffstat (limited to 'virt/kvm/arm/vgic/vgic-mmio-v3.c')
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio-v3.c17
1 files changed, 10 insertions, 7 deletions
diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c
index 408ef06638fc..83786108829e 100644
--- a/virt/kvm/arm/vgic/vgic-mmio-v3.c
+++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c
@@ -129,6 +129,7 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
{
int intid = VGIC_ADDR_TO_INTID(addr, 64);
struct vgic_irq *irq;
+ unsigned long flags;
/* The upper word is WI for us since we don't implement Aff3. */
if (addr & 4)
@@ -139,13 +140,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu,
if (!irq)
return;
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
/* We only care about and preserve Aff0, Aff1 and Aff2. */
irq->mpidr = val & GENMASK(23, 0);
irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr);
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
vgic_put_irq(vcpu->kvm, irq);
}
@@ -241,11 +242,12 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
{
u32 intid = VGIC_ADDR_TO_INTID(addr, 1);
int i;
+ unsigned long flags;
for (i = 0; i < len * 8; i++) {
struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
if (test_bit(i, &val)) {
/*
* pending_latch is set irrespective of irq type
@@ -253,10 +255,10 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu,
* restore irq config before pending info.
*/
irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
} else {
irq->pending_latch = false;
- spin_unlock(&irq->irq_lock);
+ spin_unlock_irqrestore(&irq->irq_lock, flags);
}
vgic_put_irq(vcpu->kvm, irq);
@@ -799,6 +801,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
int sgi, c;
int vcpu_id = vcpu->vcpu_id;
bool broadcast;
+ unsigned long flags;
sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT;
broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT);
@@ -837,10 +840,10 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg)
irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi);
- spin_lock(&irq->irq_lock);
+ spin_lock_irqsave(&irq->irq_lock, flags);
irq->pending_latch = true;
- vgic_queue_irq_unlock(vcpu->kvm, irq);
+ vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
vgic_put_irq(vcpu->kvm, irq);
}
}