aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/irqchip/irq-gic.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/irqchip/irq-gic.c')
-rw-r--r--drivers/irqchip/irq-gic.c247
1 files changed, 145 insertions, 102 deletions
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index a27ba2cc1dce..6053245a4754 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -83,9 +83,6 @@ struct gic_chip_data {
#endif
struct irq_domain *domain;
unsigned int gic_irqs;
-#ifdef CONFIG_GIC_NON_BANKED
- void __iomem *(*get_base)(union gic_base *);
-#endif
};
#ifdef CONFIG_BL_SWITCHER
@@ -124,36 +121,30 @@ static struct gic_chip_data gic_data[CONFIG_ARM_GIC_MAX_NR] __read_mostly;
static struct gic_kvm_info gic_v2_kvm_info;
+static DEFINE_PER_CPU(u32, sgi_intid);
+
#ifdef CONFIG_GIC_NON_BANKED
-static void __iomem *gic_get_percpu_base(union gic_base *base)
-{
- return raw_cpu_read(*base->percpu_base);
-}
+static DEFINE_STATIC_KEY_FALSE(frankengic_key);
-static void __iomem *gic_get_common_base(union gic_base *base)
+static void enable_frankengic(void)
{
- return base->common_base;
+ static_branch_enable(&frankengic_key);
}
-static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data)
+static inline void __iomem *__get_base(union gic_base *base)
{
- return data->get_base(&data->dist_base);
-}
+ if (static_branch_unlikely(&frankengic_key))
+ return raw_cpu_read(*base->percpu_base);
-static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data)
-{
- return data->get_base(&data->cpu_base);
+ return base->common_base;
}
-static inline void gic_set_base_accessor(struct gic_chip_data *data,
- void __iomem *(*f)(union gic_base *))
-{
- data->get_base = f;
-}
+#define gic_data_dist_base(d) __get_base(&(d)->dist_base)
+#define gic_data_cpu_base(d) __get_base(&(d)->cpu_base)
#else
#define gic_data_dist_base(d) ((d)->dist_base.common_base)
#define gic_data_cpu_base(d) ((d)->cpu_base.common_base)
-#define gic_set_base_accessor(d, f)
+#define enable_frankengic() do { } while(0)
#endif
static inline void __iomem *gic_dist_base(struct irq_data *d)
@@ -226,16 +217,26 @@ static void gic_unmask_irq(struct irq_data *d)
static void gic_eoi_irq(struct irq_data *d)
{
- writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
+ u32 hwirq = gic_irq(d);
+
+ if (hwirq < 16)
+ hwirq = this_cpu_read(sgi_intid);
+
+ writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_EOI);
}
static void gic_eoimode1_eoi_irq(struct irq_data *d)
{
+ u32 hwirq = gic_irq(d);
+
/* Do not deactivate an IRQ forwarded to a vcpu. */
if (irqd_is_forwarded_to_vcpu(d))
return;
- writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
+ if (hwirq < 16)
+ hwirq = this_cpu_read(sgi_intid);
+
+ writel_relaxed(hwirq, gic_cpu_base(d) + GIC_CPU_DEACTIVATE);
}
static int gic_irq_set_irqchip_state(struct irq_data *d,
@@ -295,7 +296,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
/* Interrupt configuration for SGIs can't be changed */
if (gicirq < 16)
- return -EINVAL;
+ return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
/* SPIs have restrictions on the supported types */
if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH &&
@@ -315,7 +316,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
{
/* Only interrupts on the primary GIC can be forwarded to a vcpu. */
- if (cascading_gic_irq(d))
+ if (cascading_gic_irq(d) || gic_irq(d) < 16)
return -EINVAL;
if (vcpu)
@@ -325,27 +326,10 @@ static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
return 0;
}
-#ifdef CONFIG_SMP
-static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
- bool force)
+static int gic_retrigger(struct irq_data *data)
{
- void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
- unsigned int cpu;
-
- if (!force)
- cpu = cpumask_any_and(mask_val, cpu_online_mask);
- else
- cpu = cpumask_first(mask_val);
-
- if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
- return -EINVAL;
-
- writeb_relaxed(gic_cpu_map[cpu], reg);
- irq_data_update_effective_affinity(d, cpumask_of(cpu));
-
- return IRQ_SET_MASK_OK_DONE;
+ return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
}
-#endif
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
@@ -357,31 +341,33 @@ static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK);
irqnr = irqstat & GICC_IAR_INT_ID_MASK;
- if (likely(irqnr > 15 && irqnr < 1020)) {
- if (static_branch_likely(&supports_deactivate_key))
- writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
- isb();
- handle_domain_irq(gic->domain, irqnr, regs);
- continue;
- }
- if (irqnr < 16) {
+ if (unlikely(irqnr >= 1020))
+ break;
+
+ if (static_branch_likely(&supports_deactivate_key))
writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI);
- if (static_branch_likely(&supports_deactivate_key))
- writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE);
-#ifdef CONFIG_SMP
+ isb();
+
+ /*
+ * Ensure any shared data written by the CPU sending the IPI
+ * is read after we've read the ACK register on the GIC.
+ *
+ * Pairs with the write barrier in gic_ipi_send_mask
+ */
+ if (irqnr <= 15) {
+ smp_rmb();
+
/*
- * Ensure any shared data written by the CPU sending
- * the IPI is read after we've read the ACK register
- * on the GIC.
- *
- * Pairs with the write barrier in gic_raise_softirq
+ * The GIC encodes the source CPU in GICC_IAR,
+ * leading to the deactivation to fail if not
+ * written back as is to GICC_EOI. Stash the INTID
+ * away for gic_eoi_irq() to write back. This only
+ * works because we don't nest SGIs...
*/
- smp_rmb();
- handle_IPI(irqnr, regs);
-#endif
- continue;
+ this_cpu_write(sgi_intid, irqstat);
}
- break;
+
+ handle_domain_irq(gic->domain, irqnr, regs);
} while (1);
}
@@ -417,6 +403,7 @@ static const struct irq_chip gic_chip = {
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq,
.irq_set_type = gic_set_type,
+ .irq_retrigger = gic_retrigger,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.flags = IRQCHIP_SET_TYPE_MASKED |
@@ -728,11 +715,6 @@ static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v)
int i;
for (i = 0; i < CONFIG_ARM_GIC_MAX_NR; i++) {
-#ifdef CONFIG_GIC_NON_BANKED
- /* Skip over unused GICs */
- if (!gic_data[i].get_base)
- continue;
-#endif
switch (cmd) {
case CPU_PM_ENTER:
gic_cpu_save(&gic_data[i]);
@@ -795,14 +777,34 @@ static int gic_pm_init(struct gic_chip_data *gic)
#endif
#ifdef CONFIG_SMP
-static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
+static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
+ bool force)
+{
+ void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + gic_irq(d);
+ unsigned int cpu;
+
+ if (!force)
+ cpu = cpumask_any_and(mask_val, cpu_online_mask);
+ else
+ cpu = cpumask_first(mask_val);
+
+ if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
+ return -EINVAL;
+
+ writeb_relaxed(gic_cpu_map[cpu], reg);
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
+ return IRQ_SET_MASK_OK_DONE;
+}
+
+static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
{
int cpu;
unsigned long flags, map = 0;
if (unlikely(nr_cpu_ids == 1)) {
/* Only one CPU? let's do a self-IPI... */
- writel_relaxed(2 << 24 | irq,
+ writel_relaxed(2 << 24 | d->hwirq,
gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
return;
}
@@ -820,10 +822,41 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
dmb(ishst);
/* this always happens on GIC0 */
- writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
+ writel_relaxed(map << 16 | d->hwirq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
gic_unlock_irqrestore(flags);
}
+
+static int gic_starting_cpu(unsigned int cpu)
+{
+ gic_cpu_init(&gic_data[0]);
+ return 0;
+}
+
+static __init void gic_smp_init(void)
+{
+ struct irq_fwspec sgi_fwspec = {
+ .fwnode = gic_data[0].domain->fwnode,
+ .param_count = 1,
+ };
+ int base_sgi;
+
+ cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
+ "irqchip/arm/gic:starting",
+ gic_starting_cpu, NULL);
+
+ base_sgi = __irq_domain_alloc_irqs(gic_data[0].domain, -1, 8,
+ NUMA_NO_NODE, &sgi_fwspec,
+ false, NULL);
+ if (WARN_ON(base_sgi <= 0))
+ return;
+
+ set_smp_ipi_range(base_sgi, 8);
+}
+#else
+#define gic_smp_init() do { } while(0)
+#define gic_set_affinity NULL
+#define gic_ipi_send_mask NULL
#endif
#ifdef CONFIG_BL_SWITCHER
@@ -969,17 +1002,30 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hw)
{
struct gic_chip_data *gic = d->host_data;
+ struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
- if (hw < 32) {
+ switch (hw) {
+ case 0 ... 15:
+ irq_set_percpu_devid(irq);
+ irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
+ handle_percpu_devid_fasteoi_ipi,
+ NULL, NULL);
+ break;
+ case 16 ... 31:
irq_set_percpu_devid(irq);
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_percpu_devid_irq, NULL, NULL);
- } else {
+ break;
+ default:
irq_domain_set_info(d, irq, hw, &gic->chip, d->host_data,
handle_fasteoi_irq, NULL, NULL);
irq_set_probe(irq);
- irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
+ irqd_set_single_target(irqd);
+ break;
}
+
+ /* Prevents SW retriggers which mess up the ACK/EOI ordering */
+ irqd_set_handle_enforce_irqctx(irqd);
return 0;
}
@@ -992,19 +1038,26 @@ static int gic_irq_domain_translate(struct irq_domain *d,
unsigned long *hwirq,
unsigned int *type)
{
+ if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
+ *hwirq = fwspec->param[0];
+ *type = IRQ_TYPE_EDGE_RISING;
+ return 0;
+ }
+
if (is_of_node(fwspec->fwnode)) {
if (fwspec->param_count < 3)
return -EINVAL;
- /* Get the interrupt number and add 16 to skip over SGIs */
- *hwirq = fwspec->param[1] + 16;
-
- /*
- * For SPIs, we need to add 16 more to get the GIC irq
- * ID number
- */
- if (!fwspec->param[0])
- *hwirq += 16;
+ switch (fwspec->param[0]) {
+ case 0: /* SPI */
+ *hwirq = fwspec->param[1] + 32;
+ break;
+ case 1: /* PPI */
+ *hwirq = fwspec->param[1] + 16;
+ break;
+ default:
+ return -EINVAL;
+ }
*type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
@@ -1027,12 +1080,6 @@ static int gic_irq_domain_translate(struct irq_domain *d,
return -EINVAL;
}
-static int gic_starting_cpu(unsigned int cpu)
-{
- gic_cpu_init(&gic_data[0]);
- return 0;
-}
-
static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
unsigned int nr_irqs, void *arg)
{
@@ -1079,10 +1126,10 @@ static void gic_init_chip(struct gic_chip_data *gic, struct device *dev,
gic->chip.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity;
}
-#ifdef CONFIG_SMP
- if (gic == &gic_data[0])
+ if (gic == &gic_data[0]) {
gic->chip.irq_set_affinity = gic_set_affinity;
-#endif
+ gic->chip.ipi_send_mask = gic_ipi_send_mask;
+ }
}
static int gic_init_bases(struct gic_chip_data *gic,
@@ -1112,7 +1159,7 @@ static int gic_init_bases(struct gic_chip_data *gic,
gic->raw_cpu_base + offset;
}
- gic_set_base_accessor(gic, gic_get_percpu_base);
+ enable_frankengic();
} else {
/* Normal, sane GIC... */
WARN(gic->percpu_offset,
@@ -1120,7 +1167,6 @@ static int gic_init_bases(struct gic_chip_data *gic,
gic->percpu_offset);
gic->dist_base.common_base = gic->raw_dist_base;
gic->cpu_base.common_base = gic->raw_cpu_base;
- gic_set_base_accessor(gic, gic_get_common_base);
}
/*
@@ -1199,12 +1245,7 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
*/
for (i = 0; i < NR_GIC_CPU_IF; i++)
gic_cpu_map[i] = 0xff;
-#ifdef CONFIG_SMP
- set_smp_cross_call(gic_raise_softirq);
-#endif
- cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
- "irqchip/arm/gic:starting",
- gic_starting_cpu, NULL);
+
set_handle_irq(gic_handle_irq);
if (static_branch_likely(&supports_deactivate_key))
pr_info("GIC: Using split EOI/Deactivate mode\n");
@@ -1221,6 +1262,8 @@ static int __init __gic_init_bases(struct gic_chip_data *gic,
ret = gic_init_bases(gic, handle);
if (ret)
kfree(name);
+ else if (gic == &gic_data[0])
+ gic_smp_init();
return ret;
}