aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2022-05-17 10:37:06 +0100
committerMarc Zyngier <maz@kernel.org>2022-05-17 10:37:06 +0100
commit492449ae4f0ad96948c3e029ca00736a7f1b3d77 (patch)
treefb37185cef4cc5a3d625f1f2d4714129fe59cdee
parentMerge branch irq/misc-5.19 into irq/irqchip-next (diff)
parentirqchip/gic-v3: Fix priority mask handling (diff)
downloadlinux-dev-492449ae4f0ad96948c3e029ca00736a7f1b3d77.tar.xz
linux-dev-492449ae4f0ad96948c3e029ca00736a7f1b3d77.zip
Merge branch irq/gic-v3-nmi-fixes-5.19 into irq/irqchip-next
* irq/gic-v3-nmi-fixes-5.19: : . : GICv3 pseudo-NMI fixes from Mark Rutland: : : "These patches fix a couple of issues with the way GICv3 pseudo-NMIs are : handled: : : * The first patch adds a barrier we missed from NMI handling due to an : oversight. : : * The second patch refactors some logic around reads from ICC_IAR1_EL1 : and adds commentary to explain what's going on. : : * The third patch descends into madness, reworking gic_handle_irq() to : consistently manage ICC_PMR_EL1 + DAIF and avoid cases where these can : be left in an inconsistent state while softirqs are processed." : . irqchip/gic-v3: Fix priority mask handling irqchip/gic-v3: Refactor ISB + EOIR at ack time irqchip/gic-v3: Ensure pseudo-NMIs have an ISB between ack and handling Signed-off-by: Marc Zyngier <maz@kernel.org>
-rw-r--r--arch/arm/include/asm/arch_gicv3.h7
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h6
-rw-r--r--drivers/irqchip/irq-gic-v3.c183
3 files changed, 121 insertions, 75 deletions
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 413abfb42989..f82a819eb0db 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -48,6 +48,7 @@ static inline u32 read_ ## a64(void) \
return read_sysreg(a32); \
} \
+CPUIF_MAP(ICC_EOIR1, ICC_EOIR1_EL1)
CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
@@ -63,12 +64,6 @@ CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
/* Low-level accessors */
-static inline void gic_write_eoir(u32 irq)
-{
- write_sysreg(irq, ICC_EOIR1);
- isb();
-}
-
static inline void gic_write_dir(u32 val)
{
write_sysreg(val, ICC_DIR);
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 8bd5afc7b692..48d4473e8eee 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -26,12 +26,6 @@
* sets the GP register's most significant bits to 0 with an explicit cast.
*/
-static inline void gic_write_eoir(u32 irq)
-{
- write_sysreg_s(irq, SYS_ICC_EOIR1_EL1);
- isb();
-}
-
static __always_inline void gic_write_dir(u32 irq)
{
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index 9d220c6c053b..2be8dea6b6b0 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -559,7 +559,8 @@ static void gic_irq_nmi_teardown(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- gic_write_eoir(gic_irq(d));
+ write_gicreg(gic_irq(d), ICC_EOIR1_EL1);
+ isb();
}
static void gic_eoimode1_eoi_irq(struct irq_data *d)
@@ -639,82 +640,101 @@ static void gic_deactivate_unhandled(u32 irqnr)
if (irqnr < 8192)
gic_write_dir(irqnr);
} else {
- gic_write_eoir(irqnr);
+ write_gicreg(irqnr, ICC_EOIR1_EL1);
+ isb();
}
}
-static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+/*
+ * Follow a read of the IAR with any HW maintenance that needs to happen prior
+ * to invoking the relevant IRQ handler. We must do two things:
+ *
+ * (1) Ensure instruction ordering between a read of IAR and subsequent
+ * instructions in the IRQ handler using an ISB.
+ *
+ * It is possible for the IAR to report an IRQ which was signalled *after*
+ * the CPU took an IRQ exception as multiple interrupts can race to be
+ * recognized by the GIC, earlier interrupts could be withdrawn, and/or
+ * later interrupts could be prioritized by the GIC.
+ *
+ * For devices which are tightly coupled to the CPU, such as PMUs, a
+ * context synchronization event is necessary to ensure that system
+ * register state is not stale, as these may have been indirectly written
+ * *after* exception entry.
+ *
+ * (2) Deactivate the interrupt when EOI mode 1 is in use.
+ */
+static inline void gic_complete_ack(u32 irqnr)
{
- bool irqs_enabled = interrupts_enabled(regs);
- int err;
-
- if (irqs_enabled)
- nmi_enter();
-
if (static_branch_likely(&supports_deactivate_key))
- gic_write_eoir(irqnr);
- /*
- * Leave the PSR.I bit set to prevent other NMIs to be
- * received while handling this one.
- * PSR.I will be restored when we ERET to the
- * interrupted context.
- */
- err = generic_handle_domain_nmi(gic_data.domain, irqnr);
- if (err)
- gic_deactivate_unhandled(irqnr);
+ write_gicreg(irqnr, ICC_EOIR1_EL1);
- if (irqs_enabled)
- nmi_exit();
+ isb();
}
-static u32 do_read_iar(struct pt_regs *regs)
+static bool gic_rpr_is_nmi_prio(void)
{
- u32 iar;
+ if (!gic_supports_nmi())
+ return false;
- if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
- u64 pmr;
+ return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI));
+}
- /*
- * We were in a context with IRQs disabled. However, the
- * entry code has set PMR to a value that allows any
- * interrupt to be acknowledged, and not just NMIs. This can
- * lead to surprising effects if the NMI has been retired in
- * the meantime, and that there is an IRQ pending. The IRQ
- * would then be taken in NMI context, something that nobody
- * wants to debug twice.
- *
- * Until we sort this, drop PMR again to a level that will
- * actually only allow NMIs before reading IAR, and then
- * restore it to what it was.
- */
- pmr = gic_read_pmr();
- gic_pmr_mask_irqs();
- isb();
+static bool gic_irqnr_is_special(u32 irqnr)
+{
+ return irqnr >= 1020 && irqnr <= 1023;
+}
- iar = gic_read_iar();
+static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
+{
+ if (gic_irqnr_is_special(irqnr))
+ return;
- gic_write_pmr(pmr);
- } else {
- iar = gic_read_iar();
+ gic_complete_ack(irqnr);
+
+ if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
+ WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
+ gic_deactivate_unhandled(irqnr);
}
+}
+
+static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+{
+ if (gic_irqnr_is_special(irqnr))
+ return;
+
+ gic_complete_ack(irqnr);
- return iar;
+ if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
+ WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
+ gic_deactivate_unhandled(irqnr);
+ }
}
-static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+/*
+ * An exception has been taken from a context with IRQs enabled, and this could
+ * be an IRQ or an NMI.
+ *
+ * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
+ * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
+ * after handling any NMI but before handling any IRQ.
+ *
+ * The entry code has performed IRQ entry, and if an NMI is detected we must
+ * perform NMI entry/exit around invoking the handler.
+ */
+static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
{
+ bool is_nmi;
u32 irqnr;
- irqnr = do_read_iar(regs);
+ irqnr = gic_read_iar();
- /* Check for special IDs first */
- if ((irqnr >= 1020 && irqnr <= 1023))
- return;
+ is_nmi = gic_rpr_is_nmi_prio();
- if (gic_supports_nmi() &&
- unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI))) {
- gic_handle_nmi(irqnr, regs);
- return;
+ if (is_nmi) {
+ nmi_enter();
+ __gic_handle_nmi(irqnr, regs);
+ nmi_exit();
}
if (gic_prio_masking_enabled()) {
@@ -722,15 +742,52 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs
gic_arch_enable_irqs();
}
- if (static_branch_likely(&supports_deactivate_key))
- gic_write_eoir(irqnr);
- else
- isb();
+ if (!is_nmi)
+ __gic_handle_irq(irqnr, regs);
+}
- if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
- WARN_ONCE(true, "Unexpected interrupt received!\n");
- gic_deactivate_unhandled(irqnr);
- }
+/*
+ * An exception has been taken from a context with IRQs disabled, which can only
+ * be an NMI.
+ *
+ * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
+ * DAIF.IF (and ICC_PMR_EL1) unchanged.
+ *
+ * The entry code has performed NMI entry.
+ */
+static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
+{
+ u64 pmr;
+ u32 irqnr;
+
+ /*
+ * We were in a context with IRQs disabled. However, the
+ * entry code has set PMR to a value that allows any
+ * interrupt to be acknowledged, and not just NMIs. This can
+ * lead to surprising effects if the NMI has been retired in
+ * the meantime, and that there is an IRQ pending. The IRQ
+ * would then be taken in NMI context, something that nobody
+ * wants to debug twice.
+ *
+ * Until we sort this, drop PMR again to a level that will
+ * actually only allow NMIs before reading IAR, and then
+ * restore it to what it was.
+ */
+ pmr = gic_read_pmr();
+ gic_pmr_mask_irqs();
+ isb();
+ irqnr = gic_read_iar();
+ gic_write_pmr(pmr);
+
+ __gic_handle_nmi(irqnr, regs);
+}
+
+static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+{
+ if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
+ __gic_handle_irq_from_irqsoff(regs);
+ else
+ __gic_handle_irq_from_irqson(regs);
}
static u32 gic_get_pribits(void)