aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/irqchip
diff options
context:
space:
mode:
authorZenghui Yu <yuzenghui@huawei.com>2020-07-20 17:23:28 +0800
committerMarc Zyngier <maz@kernel.org>2020-07-27 08:55:03 +0100
commit3af9571cd585efafc2facbd8dbd407317ff898cf (patch)
tree2bc966928fa712ca99b2fc0344ec678f50780ee2 /drivers/irqchip
parentirqchip/irq-bcm7038-l1: Guard uses of cpu_logical_map (diff)
downloadwireguard-linux-3af9571cd585efafc2facbd8dbd407317ff898cf.tar.xz
wireguard-linux-3af9571cd585efafc2facbd8dbd407317ff898cf.zip
irqchip/gic-v4.1: Ensure accessing the correct RD when writing INVALLR
The GICv4.1 spec tells us that it's CONSTRAINED UNPREDICTABLE to issue a register-based invalidation operation for a vPEID not mapped to that RD, or another RD within the same CommonLPIAff group. To follow this rule, commit f3a059219bc7 ("irqchip/gic-v4.1: Ensure mutual exclusion between vPE affinity change and RD access") tried to address the race between the RD accesses and the vPE affinity change, but somehow forgot to take GICR_INVALLR into account. Let's take the vpe_lock before evaluating vpe->col_idx to fix it. Fixes: f3a059219bc7 ("irqchip/gic-v4.1: Ensure mutual exclusion between vPE affinity change and RD access") Signed-off-by: Zenghui Yu <yuzenghui@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20200720092328.708-1-yuzenghui@huawei.com
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index cd685f521c77..e3beca05e60f 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -4079,18 +4079,22 @@ static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
static void its_vpe_4_1_invall(struct its_vpe *vpe)
{
void __iomem *rdbase;
+ unsigned long flags;
u64 val;
+ int cpu;
val = GICR_INVALLR_V;
val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
/* Target the redistributor this vPE is currently known on */
- raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
- rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
+ cpu = vpe_to_cpuid_lock(vpe, &flags);
+ raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
gic_write_lpir(val, rdbase + GICR_INVALLR);
wait_for_syncr(rdbase);
- raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
+ raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
+ vpe_to_cpuid_unlock(vpe, flags);
}
static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)