aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/irqchip
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/irqchip')
-rw-r--r--drivers/irqchip/Kconfig16
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-aspeed-i2c-ic.c4
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c171
-rw-r--r--drivers/irqchip/irq-gic-common.c5
-rw-r--r--drivers/irqchip/irq-gic-common.h2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c390
-rw-r--r--drivers/irqchip/irq-gic-v3.c50
-rw-r--r--drivers/irqchip/irq-gic.c71
-rw-r--r--drivers/irqchip/irq-meson-gpio.c419
-rw-r--r--drivers/irqchip/irq-mips-gic.c226
-rw-r--r--drivers/irqchip/irq-omap-intc.c16
-rw-r--r--drivers/irqchip/irq-ompic.c202
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c9
-rw-r--r--drivers/irqchip/irq-sni-exiu.c227
-rw-r--r--drivers/irqchip/irq-stm32-exti.c206
16 files changed, 1695 insertions, 322 deletions
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 9d8a1dd2e2c2..53380bd72ea4 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -1,3 +1,5 @@
+menu "IRQ chip support"
+
config IRQCHIP
def_bool y
depends on OF_IRQ
@@ -151,6 +153,9 @@ config CLPS711X_IRQCHIP
select SPARSE_IRQ
default y
+config OMPIC
+ bool
+
config OR1K_PIC
bool
select IRQ_DOMAIN
@@ -304,6 +309,7 @@ config EZNPS_GIC
config STM32_EXTI
bool
select IRQ_DOMAIN
+ select GENERIC_IRQ_CHIP
config QCOM_IRQ_COMBINER
bool "QCOM IRQ combiner support"
@@ -321,3 +327,13 @@ config IRQ_UNIPHIER_AIDET
select IRQ_DOMAIN_HIERARCHY
help
Support for the UniPhier AIDET (ARM Interrupt Detector).
+
+config MESON_IRQ_GPIO
+ bool "Meson GPIO Interrupt Multiplexer"
+ depends on ARCH_MESON
+ select IRQ_DOMAIN
+ select IRQ_DOMAIN_HIERARCHY
+ help
+ Support Meson SoC Family GPIO Interrupt Multiplexer
+
+endmenu
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index b842dfdc903f..dae7282bfdef 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o
obj-$(CONFIG_METAG) += irq-metag-ext.o
obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o
obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o
+obj-$(CONFIG_OMPIC) += irq-ompic.o
obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o
obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o
obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o
@@ -80,3 +81,5 @@ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o
obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o
obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o
+obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o
+obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o
diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c
index 815b88dd18f2..f20200af0992 100644
--- a/drivers/irqchip/irq-aspeed-i2c-ic.c
+++ b/drivers/irqchip/irq-aspeed-i2c-ic.c
@@ -76,8 +76,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node,
return -ENOMEM;
i2c_ic->base = of_iomap(node, 0);
- if (IS_ERR(i2c_ic->base)) {
- ret = PTR_ERR(i2c_ic->base);
+ if (!i2c_ic->base) {
+ ret = -ENOMEM;
goto err_free_ic;
}
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index b009b916a292..691d20eb0bec 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -1,7 +1,7 @@
/*
* Generic Broadcom Set Top Box Level 2 Interrupt controller driver
*
- * Copyright (C) 2014 Broadcom Corporation
+ * Copyright (C) 2014-2017 Broadcom
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -31,35 +31,82 @@
#include <linux/irqchip.h>
#include <linux/irqchip/chained_irq.h>
-/* Register offsets in the L2 interrupt controller */
-#define CPU_STATUS 0x00
-#define CPU_SET 0x04
-#define CPU_CLEAR 0x08
-#define CPU_MASK_STATUS 0x0c
-#define CPU_MASK_SET 0x10
-#define CPU_MASK_CLEAR 0x14
+struct brcmstb_intc_init_params {
+ irq_flow_handler_t handler;
+ int cpu_status;
+ int cpu_clear;
+ int cpu_mask_status;
+ int cpu_mask_set;
+ int cpu_mask_clear;
+};
+
+/* Register offsets in the L2 latched interrupt controller */
+static const struct brcmstb_intc_init_params l2_edge_intc_init = {
+ .handler = handle_edge_irq,
+ .cpu_status = 0x00,
+ .cpu_clear = 0x08,
+ .cpu_mask_status = 0x0c,
+ .cpu_mask_set = 0x10,
+ .cpu_mask_clear = 0x14
+};
+
+/* Register offsets in the L2 level interrupt controller */
+static const struct brcmstb_intc_init_params l2_lvl_intc_init = {
+ .handler = handle_level_irq,
+ .cpu_status = 0x00,
+ .cpu_clear = -1, /* Register not present */
+ .cpu_mask_status = 0x04,
+ .cpu_mask_set = 0x08,
+ .cpu_mask_clear = 0x0C
+};
/* L2 intc private data structure */
struct brcmstb_l2_intc_data {
- int parent_irq;
- void __iomem *base;
struct irq_domain *domain;
+ struct irq_chip_generic *gc;
+ int status_offset;
+ int mask_offset;
bool can_wake;
u32 saved_mask; /* for suspend/resume */
};
+/**
+ * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt
+ * @d: irq_data
+ *
+ * Chip has separate enable/disable registers instead of a single mask
+ * register and pending interrupt is acknowledged by setting a bit.
+ *
+ * Note: This function is generic and could easily be added to the
+ * generic irqchip implementation if there ever becomes a will to do so.
+ * Perhaps with a name like irq_gc_mask_disable_and_ack_set().
+ *
+ * e.g.: https://patchwork.kernel.org/patch/9831047/
+ */
+static void brcmstb_l2_mask_and_ack(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+ u32 mask = d->mask;
+
+ irq_gc_lock(gc);
+ irq_reg_writel(gc, mask, ct->regs.disable);
+ *ct->mask_cache &= ~mask;
+ irq_reg_writel(gc, mask, ct->regs.ack);
+ irq_gc_unlock(gc);
+}
+
static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
{
struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc);
- struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0);
struct irq_chip *chip = irq_desc_get_chip(desc);
unsigned int irq;
u32 status;
chained_irq_enter(chip, desc);
- status = irq_reg_readl(gc, CPU_STATUS) &
- ~(irq_reg_readl(gc, CPU_MASK_STATUS));
+ status = irq_reg_readl(b->gc, b->status_offset) &
+ ~(irq_reg_readl(b->gc, b->mask_offset));
if (status == 0) {
raw_spin_lock(&desc->lock);
@@ -70,10 +117,8 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc)
do {
irq = ffs(status) - 1;
- /* ack at our level */
- irq_reg_writel(gc, 1 << irq, CPU_CLEAR);
status &= ~(1 << irq);
- generic_handle_irq(irq_find_mapping(b->domain, irq));
+ generic_handle_irq(irq_linear_revmap(b->domain, irq));
} while (status);
out:
chained_irq_exit(chip, desc);
@@ -82,16 +127,17 @@ out:
static void brcmstb_l2_intc_suspend(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
irq_gc_lock(gc);
/* Save the current mask */
- b->saved_mask = irq_reg_readl(gc, CPU_MASK_STATUS);
+ b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
if (b->can_wake) {
/* Program the wakeup mask */
- irq_reg_writel(gc, ~gc->wake_active, CPU_MASK_SET);
- irq_reg_writel(gc, gc->wake_active, CPU_MASK_CLEAR);
+ irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
+ irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
}
irq_gc_unlock(gc);
}
@@ -99,49 +145,56 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
static void brcmstb_l2_intc_resume(struct irq_data *d)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
struct brcmstb_l2_intc_data *b = gc->private;
irq_gc_lock(gc);
- /* Clear unmasked non-wakeup interrupts */
- irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, CPU_CLEAR);
+ if (ct->chip.irq_ack) {
+ /* Clear unmasked non-wakeup interrupts */
+ irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
+ ct->regs.ack);
+ }
/* Restore the saved mask */
- irq_reg_writel(gc, b->saved_mask, CPU_MASK_SET);
- irq_reg_writel(gc, ~b->saved_mask, CPU_MASK_CLEAR);
+ irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
+ irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
irq_gc_unlock(gc);
}
static int __init brcmstb_l2_intc_of_init(struct device_node *np,
- struct device_node *parent)
+ struct device_node *parent,
+ const struct brcmstb_intc_init_params
+ *init_params)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
struct brcmstb_l2_intc_data *data;
- struct irq_chip_generic *gc;
struct irq_chip_type *ct;
int ret;
unsigned int flags;
+ int parent_irq;
+ void __iomem *base;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->base = of_iomap(np, 0);
- if (!data->base) {
+ base = of_iomap(np, 0);
+ if (!base) {
pr_err("failed to remap intc L2 registers\n");
ret = -ENOMEM;
goto out_free;
}
/* Disable all interrupts by default */
- writel(0xffffffff, data->base + CPU_MASK_SET);
+ writel(0xffffffff, base + init_params->cpu_mask_set);
/* Wakeup interrupts may be retained from S5 (cold boot) */
data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake");
- if (!data->can_wake)
- writel(0xffffffff, data->base + CPU_CLEAR);
+ if (!data->can_wake && (init_params->cpu_clear >= 0))
+ writel(0xffffffff, base + init_params->cpu_clear);
- data->parent_irq = irq_of_parse_and_map(np, 0);
- if (!data->parent_irq) {
+ parent_irq = irq_of_parse_and_map(np, 0);
+ if (!parent_irq) {
pr_err("failed to find parent interrupt\n");
ret = -EINVAL;
goto out_unmap;
@@ -163,29 +216,39 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
/* Allocate a single Generic IRQ chip for this node */
ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
- np->full_name, handle_edge_irq, clr, 0, flags);
+ np->full_name, init_params->handler, clr, 0, flags);
if (ret) {
pr_err("failed to allocate generic irq chip\n");
goto out_free_domain;
}
/* Set the IRQ chaining logic */
- irq_set_chained_handler_and_data(data->parent_irq,
+ irq_set_chained_handler_and_data(parent_irq,
brcmstb_l2_intc_irq_handle, data);
- gc = irq_get_domain_generic_chip(data->domain, 0);
- gc->reg_base = data->base;
- gc->private = data;
- ct = gc->chip_types;
-
- ct->chip.irq_ack = irq_gc_ack_set_bit;
- ct->regs.ack = CPU_CLEAR;
+ data->gc = irq_get_domain_generic_chip(data->domain, 0);
+ data->gc->reg_base = base;
+ data->gc->private = data;
+ data->status_offset = init_params->cpu_status;
+ data->mask_offset = init_params->cpu_mask_status;
+
+ ct = data->gc->chip_types;
+
+ if (init_params->cpu_clear >= 0) {
+ ct->regs.ack = init_params->cpu_clear;
+ ct->chip.irq_ack = irq_gc_ack_set_bit;
+ ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack;
+ } else {
+ /* No Ack - but still slightly more efficient to define this */
+ ct->chip.irq_mask_ack = irq_gc_mask_disable_reg;
+ }
ct->chip.irq_mask = irq_gc_mask_disable_reg;
- ct->regs.disable = CPU_MASK_SET;
+ ct->regs.disable = init_params->cpu_mask_set;
+ ct->regs.mask = init_params->cpu_mask_status;
ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
- ct->regs.enable = CPU_MASK_CLEAR;
+ ct->regs.enable = init_params->cpu_mask_clear;
ct->chip.irq_suspend = brcmstb_l2_intc_suspend;
ct->chip.irq_resume = brcmstb_l2_intc_resume;
@@ -195,21 +258,35 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
/* This IRQ chip can wake the system, set all child interrupts
* in wake_enabled mask
*/
- gc->wake_enabled = 0xffffffff;
+ data->gc->wake_enabled = 0xffffffff;
ct->chip.irq_set_wake = irq_gc_set_wake;
}
pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
- data->base, data->parent_irq);
+ base, parent_irq);
return 0;
out_free_domain:
irq_domain_remove(data->domain);
out_unmap:
- iounmap(data->base);
+ iounmap(base);
out_free:
kfree(data);
return ret;
}
-IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init);
+
+int __init brcmstb_l2_edge_intc_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init);
+}
+IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init);
+
+int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init);
+}
+IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc",
+ brcmstb_l2_lvl_intc_of_init);
diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c
index 9ae71804b5dd..30017df5b54c 100644
--- a/drivers/irqchip/irq-gic-common.c
+++ b/drivers/irqchip/irq-gic-common.c
@@ -40,8 +40,9 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks,
for (; quirks->desc; quirks++) {
if (quirks->iidr != (quirks->mask & iidr))
continue;
- quirks->init(data);
- pr_info("GIC: enabling workaround for %s\n", quirks->desc);
+ if (quirks->init(data))
+ pr_info("GIC: enabling workaround for %s\n",
+ quirks->desc);
}
}
diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h
index 205e5fddf6da..3919cd7c5285 100644
--- a/drivers/irqchip/irq-gic-common.h
+++ b/drivers/irqchip/irq-gic-common.h
@@ -23,7 +23,7 @@
struct gic_quirk {
const char *desc;
- void (*init)(void *data);
+ bool (*init)(void *data);
u32 iidr;
u32 mask;
};
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index e88395605e32..4039e64cd342 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -83,6 +83,8 @@ struct its_baser {
u32 psz;
};
+struct its_device;
+
/*
* The ITS structure - contains most of the infrastructure, with the
* top-level MSI domain, the command queue, the collections, and the
@@ -97,12 +99,18 @@ struct its_node {
struct its_cmd_block *cmd_write;
struct its_baser tables[GITS_BASER_NR_REGS];
struct its_collection *collections;
+ struct fwnode_handle *fwnode_handle;
+ u64 (*get_msi_base)(struct its_device *its_dev);
struct list_head its_device_list;
u64 flags;
+ unsigned long list_nr;
u32 ite_size;
u32 device_ids;
int numa_node;
+ unsigned int msi_domain_flags;
+ u32 pre_its_base; /* for Socionext Synquacer */
bool is_v4;
+ int vlpi_redist_offset;
};
#define ITS_ITT_ALIGN SZ_256
@@ -152,12 +160,6 @@ static DEFINE_SPINLOCK(its_lock);
static struct rdists *gic_rdists;
static struct irq_domain *its_parent;
-/*
- * We have a maximum number of 16 ITSs in the whole system if we're
- * using the ITSList mechanism
- */
-#define ITS_LIST_MAX 16
-
static unsigned long its_list_map;
static u16 vmovp_seq_num;
static DEFINE_RAW_SPINLOCK(vmovp_lock);
@@ -272,10 +274,12 @@ struct its_cmd_block {
#define ITS_CMD_QUEUE_SZ SZ_64K
#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
-typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *,
+typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
+ struct its_cmd_block *,
struct its_cmd_desc *);
-typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *,
+typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
+ struct its_cmd_block *,
struct its_cmd_desc *);
static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
@@ -379,7 +383,8 @@ static inline void its_fixup_cmd(struct its_cmd_block *cmd)
cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]);
}
-static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapd_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
unsigned long itt_addr;
@@ -399,7 +404,8 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd,
return NULL;
}
-static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapc_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_MAPC);
@@ -412,7 +418,8 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd,
return desc->its_mapc_cmd.col;
}
-static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_mapti_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -431,7 +438,8 @@ static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_movi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -449,7 +457,8 @@ static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_discard_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -466,7 +475,8 @@ static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_inv_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -483,7 +493,8 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_int_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -500,7 +511,8 @@ static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_clear_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
struct its_collection *col;
@@ -517,7 +529,8 @@ static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd,
return col;
}
-static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
+static struct its_collection *its_build_invall_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_INVALL);
@@ -528,7 +541,8 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd,
return NULL;
}
-static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
its_encode_cmd(cmd, GITS_CMD_VINVALL);
@@ -539,17 +553,20 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd,
return desc->its_vinvall_cmd.vpe;
}
-static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
unsigned long vpt_addr;
+ u64 target;
vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
+ target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
its_encode_cmd(cmd, GITS_CMD_VMAPP);
its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
- its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address);
+ its_encode_target(cmd, target);
its_encode_vpt_addr(cmd, vpt_addr);
its_encode_vpt_size(cmd, LPI_NRBITS - 1);
@@ -558,7 +575,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd,
return desc->its_vmapp_cmd.vpe;
}
-static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
u32 db;
@@ -580,7 +598,8 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd,
return desc->its_vmapti_cmd.vpe;
}
-static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
u32 db;
@@ -602,14 +621,18 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd,
return desc->its_vmovi_cmd.vpe;
}
-static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd,
+static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
+ struct its_cmd_block *cmd,
struct its_cmd_desc *desc)
{
+ u64 target;
+
+ target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
its_encode_cmd(cmd, GITS_CMD_VMOVP);
its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
- its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address);
+ its_encode_target(cmd, target);
its_fixup_cmd(cmd);
@@ -688,9 +711,9 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
dsb(ishst);
}
-static void its_wait_for_range_completion(struct its_node *its,
- struct its_cmd_block *from,
- struct its_cmd_block *to)
+static int its_wait_for_range_completion(struct its_node *its,
+ struct its_cmd_block *from,
+ struct its_cmd_block *to)
{
u64 rd_idx, from_idx, to_idx;
u32 count = 1000000; /* 1s! */
@@ -711,12 +734,15 @@ static void its_wait_for_range_completion(struct its_node *its,
count--;
if (!count) {
- pr_err_ratelimited("ITS queue timeout\n");
- return;
+ pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n",
+ from_idx, to_idx, rd_idx);
+ return -1;
}
cpu_relax();
udelay(1);
}
+
+ return 0;
}
/* Warning, macro hell follows */
@@ -736,7 +762,7 @@ void name(struct its_node *its, \
raw_spin_unlock_irqrestore(&its->lock, flags); \
return; \
} \
- sync_obj = builder(cmd, desc); \
+ sync_obj = builder(its, cmd, desc); \
its_flush_cmd(its, cmd); \
\
if (sync_obj) { \
@@ -744,7 +770,7 @@ void name(struct its_node *its, \
if (!sync_cmd) \
goto post; \
\
- buildfn(sync_cmd, sync_obj); \
+ buildfn(its, sync_cmd, sync_obj); \
its_flush_cmd(its, sync_cmd); \
} \
\
@@ -752,10 +778,12 @@ post: \
next_cmd = its_post_commands(its); \
raw_spin_unlock_irqrestore(&its->lock, flags); \
\
- its_wait_for_range_completion(its, cmd, next_cmd); \
+ if (its_wait_for_range_completion(its, cmd, next_cmd)) \
+ pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
}
-static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
+static void its_build_sync_cmd(struct its_node *its,
+ struct its_cmd_block *sync_cmd,
struct its_collection *sync_col)
{
its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
@@ -767,7 +795,8 @@ static void its_build_sync_cmd(struct its_cmd_block *sync_cmd,
static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
struct its_collection, its_build_sync_cmd)
-static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd,
+static void its_build_vsync_cmd(struct its_node *its,
+ struct its_cmd_block *sync_cmd,
struct its_vpe *sync_vpe)
{
its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
@@ -899,21 +928,16 @@ static void its_send_vmovi(struct its_device *dev, u32 id)
its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
}
-static void its_send_vmapp(struct its_vpe *vpe, bool valid)
+static void its_send_vmapp(struct its_node *its,
+ struct its_vpe *vpe, bool valid)
{
struct its_cmd_desc desc;
- struct its_node *its;
desc.its_vmapp_cmd.vpe = vpe;
desc.its_vmapp_cmd.valid = valid;
+ desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
- list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
- continue;
-
- desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
- its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
- }
+ its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
}
static void its_send_vmovp(struct its_vpe *vpe)
@@ -951,6 +975,9 @@ static void its_send_vmovp(struct its_vpe *vpe)
if (!its->is_v4)
continue;
+ if (!vpe->its_vm->vlpi_count[its->list_nr])
+ continue;
+
desc.its_vmovp_cmd.col = &its->collections[col_id];
its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
}
@@ -958,18 +985,12 @@ static void its_send_vmovp(struct its_vpe *vpe)
raw_spin_unlock_irqrestore(&vmovp_lock, flags);
}
-static void its_send_vinvall(struct its_vpe *vpe)
+static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
{
struct its_cmd_desc desc;
- struct its_node *its;
desc.its_vinvall_cmd.vpe = vpe;
-
- list_for_each_entry(its, &its_nodes, entry) {
- if (!its->is_v4)
- continue;
- its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
- }
+ its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
}
/*
@@ -991,9 +1012,15 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
if (irqd_is_forwarded_to_vcpu(d)) {
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
+ struct its_vlpi_map *map;
prop_page = its_dev->event_map.vm->vprop_page;
- hwirq = its_dev->event_map.vlpi_maps[event].vintid;
+ map = &its_dev->event_map.vlpi_maps[event];
+ hwirq = map->vintid;
+
+ /* Remember the updated property */
+ map->properties &= ~clr;
+ map->properties |= set | LPI_PROP_GROUP1;
} else {
prop_page = gic_rdists->prop_page;
hwirq = d->hwirq;
@@ -1099,6 +1126,13 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
return IRQ_SET_MASK_OK_DONE;
}
+static u64 its_irq_get_msi_base(struct its_device *its_dev)
+{
+ struct its_node *its = its_dev->its;
+
+ return its->phys_base + GITS_TRANSLATER;
+}
+
static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1106,7 +1140,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
u64 addr;
its = its_dev->its;
- addr = its->phys_base + GITS_TRANSLATER;
+ addr = its->get_msi_base(its_dev);
msg->address_lo = lower_32_bits(addr);
msg->address_hi = upper_32_bits(addr);
@@ -1133,6 +1167,60 @@ static int its_irq_set_irqchip_state(struct irq_data *d,
return 0;
}
+static void its_map_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ /* Not using the ITS list? Everything is always mapped. */
+ if (!its_list_map)
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ /*
+ * If the VM wasn't mapped yet, iterate over the vpes and get
+ * them mapped now.
+ */
+ vm->vlpi_count[its->list_nr]++;
+
+ if (vm->vlpi_count[its->list_nr] == 1) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++) {
+ struct its_vpe *vpe = vm->vpes[i];
+ struct irq_data *d = irq_get_irq_data(vpe->irq);
+
+ /* Map the VPE to the first possible CPU */
+ vpe->col_idx = cpumask_first(cpu_online_mask);
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
+static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
+{
+ unsigned long flags;
+
+ /* Not using the ITS list? Everything is always mapped. */
+ if (!its_list_map)
+ return;
+
+ raw_spin_lock_irqsave(&vmovp_lock, flags);
+
+ if (!--vm->vlpi_count[its->list_nr]) {
+ int i;
+
+ for (i = 0; i < vm->nr_vpes; i++)
+ its_send_vmapp(its, vm->vpes[i], false);
+ }
+
+ raw_spin_unlock_irqrestore(&vmovp_lock, flags);
+}
+
static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
@@ -1168,12 +1256,23 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
/* Already mapped, move it around */
its_send_vmovi(its_dev, event);
} else {
+ /* Ensure all the VPEs are mapped on this ITS */
+ its_map_vm(its_dev->its, info->map->vm);
+
+ /*
+ * Flag the interrupt as forwarded so that we can
+ * start poking the virtual property table.
+ */
+ irqd_set_forwarded_to_vcpu(d);
+
+ /* Write out the property to the prop table */
+ lpi_write_config(d, 0xff, info->map->properties);
+
/* Drop the physical mapping */
its_send_discard(its_dev, event);
/* and install the virtual one */
its_send_vmapti(its_dev, event);
- irqd_set_forwarded_to_vcpu(d);
/* Increment the number of VLPIs */
its_dev->event_map.nr_vlpis++;
@@ -1229,6 +1328,9 @@ static int its_vlpi_unmap(struct irq_data *d)
LPI_PROP_ENABLED |
LPI_PROP_GROUP1));
+ /* Potentially unmap the VM from this ITS */
+ its_unmap_vm(its_dev->its, its_dev->event_map.vm);
+
/*
* Drop the refcount and make the device available again if
* this was the last VLPI.
@@ -1669,23 +1771,14 @@ static void its_free_tables(struct its_node *its)
static int its_alloc_tables(struct its_node *its)
{
- u64 typer = gic_read_typer(its->base + GITS_TYPER);
- u32 ids = GITS_TYPER_DEVBITS(typer);
u64 shr = GITS_BASER_InnerShareable;
u64 cache = GITS_BASER_RaWaWb;
u32 psz = SZ_64K;
int err, i;
- if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) {
- /*
- * erratum 22375: only alloc 8MB table size
- * erratum 24313: ignore memory access type
- */
- cache = GITS_BASER_nCnB;
- ids = 0x14; /* 20 bits, 8MB */
- }
-
- its->device_ids = ids;
+ if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
+ /* erratum 24313: ignore memory access type */
+ cache = GITS_BASER_nCnB;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
struct its_baser *baser = its->tables + i;
@@ -2209,8 +2302,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
return 0;
}
-static void its_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d)
+static int its_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool early)
{
struct its_device *its_dev = irq_data_get_irq_chip_data(d);
u32 event = its_get_event_id(d);
@@ -2228,6 +2321,7 @@ static void its_irq_domain_activate(struct irq_domain *domain,
/* Map the GIC IRQ and event to the device */
its_send_mapti(its_dev, d->hwirq, event);
+ return 0;
}
static void its_irq_domain_deactivate(struct irq_domain *domain,
@@ -2394,6 +2488,8 @@ static int its_vpe_set_affinity(struct irq_data *d,
its_vpe_db_proxy_move(vpe, from, cpu);
}
+ irq_data_update_effective_affinity(d, cpumask_of(cpu));
+
return IRQ_SET_MASK_OK_DONE;
}
@@ -2461,6 +2557,26 @@ static void its_vpe_deschedule(struct its_vpe *vpe)
}
}
+static void its_vpe_invall(struct its_vpe *vpe)
+{
+ struct its_node *its;
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
+ continue;
+
+ /*
+ * Sending a VINVALL to a single ITS is enough, as all
+ * we need is to reach the redistributors.
+ */
+ its_send_vinvall(its, vpe);
+ return;
+ }
+}
+
static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
@@ -2476,7 +2592,7 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
return 0;
case INVALL_VPE:
- its_send_vinvall(vpe);
+ its_vpe_invall(vpe);
return 0;
default:
@@ -2701,23 +2817,51 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq
return err;
}
-static void its_vpe_irq_domain_activate(struct irq_domain *domain,
- struct irq_data *d)
+static int its_vpe_irq_domain_activate(struct irq_domain *domain,
+ struct irq_data *d, bool early)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /* If we use the list map, we issue VMAPP on demand... */
+ if (its_list_map)
+ return 0;
/* Map the VPE to the first possible CPU */
vpe->col_idx = cpumask_first(cpu_online_mask);
- its_send_vmapp(vpe, true);
- its_send_vinvall(vpe);
+
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ its_send_vmapp(its, vpe, true);
+ its_send_vinvall(its, vpe);
+ }
+
+ irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
+
+ return 0;
}
static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *d)
{
struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
+ struct its_node *its;
+
+ /*
+ * If we use the list map, we unmap the VPE once no VLPIs are
+ * associated with the VM.
+ */
+ if (its_list_map)
+ return;
- its_send_vmapp(vpe, false);
+ list_for_each_entry(its, &its_nodes, entry) {
+ if (!its->is_v4)
+ continue;
+
+ its_send_vmapp(its, vpe, false);
+ }
}
static const struct irq_domain_ops its_vpe_domain_ops = {
@@ -2760,26 +2904,85 @@ static int its_force_quiescent(void __iomem *base)
}
}
-static void __maybe_unused its_enable_quirk_cavium_22375(void *data)
+static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
{
struct its_node *its = data;
+ /* erratum 22375: only alloc 8MB table size */
+ its->device_ids = 0x14; /* 20 bits, 8MB */
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
+
+ return true;
}
-static void __maybe_unused its_enable_quirk_cavium_23144(void *data)
+static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
{
struct its_node *its = data;
its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
+
+ return true;
}
-static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
+static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
{
struct its_node *its = data;
/* On QDF2400, the size of the ITE is 16Bytes */
its->ite_size = 16;
+
+ return true;
+}
+
+static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
+{
+ struct its_node *its = its_dev->its;
+
+ /*
+ * The Socionext Synquacer SoC has a so-called 'pre-ITS',
+ * which maps 32-bit writes targeted at a separate window of
+ * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
+ * with device ID taken from bits [device_id_bits + 1:2] of
+ * the window offset.
+ */
+ return its->pre_its_base + (its_dev->device_id << 2);
+}
+
+static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
+{
+ struct its_node *its = data;
+ u32 pre_its_window[2];
+ u32 ids;
+
+ if (!fwnode_property_read_u32_array(its->fwnode_handle,
+ "socionext,synquacer-pre-its",
+ pre_its_window,
+ ARRAY_SIZE(pre_its_window))) {
+
+ its->pre_its_base = pre_its_window[0];
+ its->get_msi_base = its_irq_get_msi_base_pre_its;
+
+ ids = ilog2(pre_its_window[1]) - 2;
+ if (its->device_ids > ids)
+ its->device_ids = ids;
+
+ /* the pre-ITS breaks isolation, so disable MSI remapping */
+ its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
+ return true;
+ }
+ return false;
+}
+
+static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
+{
+ struct its_node *its = data;
+
+ /*
+ * Hip07 insists on using the wrong address for the VLPI
+ * page. Trick it into doing the right thing...
+ */
+ its->vlpi_redist_offset = SZ_128K;
+ return true;
}
static const struct gic_quirk its_quirks[] = {
@@ -2807,6 +3010,27 @@ static const struct gic_quirk its_quirks[] = {
.init = its_enable_quirk_qdf2400_e0065,
},
#endif
+#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
+ {
+ /*
+ * The Socionext Synquacer SoC incorporates ARM's own GIC-500
+ * implementation, but with a 'pre-ITS' added that requires
+ * special handling in software.
+ */
+ .desc = "ITS: Socionext Synquacer pre-ITS",
+ .iidr = 0x0001143b,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_socionext_synquacer,
+ },
+#endif
+#ifdef CONFIG_HISILICON_ERRATUM_161600802
+ {
+ .desc = "ITS: Hip07 erratum 161600802",
+ .iidr = 0x00000004,
+ .mask = 0xffffffff,
+ .init = its_enable_quirk_hip07_161600802,
+ },
+#endif
{
}
};
@@ -2835,7 +3059,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
inner_domain->parent = its_parent;
irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
- inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP;
+ inner_domain->flags |= its->msi_domain_flags;
info->ops = &its_msi_domain_ops;
info->data = its;
inner_domain->host_data = info;
@@ -2896,8 +3120,8 @@ static int __init its_compute_its_list_map(struct resource *res,
* locking. Should this change, we should address
* this.
*/
- its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX);
- if (its_number >= ITS_LIST_MAX) {
+ its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
+ if (its_number >= GICv4_ITS_LIST_MAX) {
pr_err("ITS@%pa: No ITSList entry available!\n",
&res->start);
return -EINVAL;
@@ -2965,6 +3189,7 @@ static int __init its_probe_one(struct resource *res,
its->base = its_base;
its->phys_base = res->start;
its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer);
+ its->device_ids = GITS_TYPER_DEVBITS(typer);
its->is_v4 = !!(typer & GITS_TYPER_VLPIS);
if (its->is_v4) {
if (!(typer & GITS_TYPER_VMOVP)) {
@@ -2972,6 +3197,8 @@ static int __init its_probe_one(struct resource *res,
if (err < 0)
goto out_free_its;
+ its->list_nr = err;
+
pr_info("ITS@%pa: Using ITS number %d\n",
&res->start, err);
} else {
@@ -2988,6 +3215,9 @@ static int __init its_probe_one(struct resource *res,
goto out_free_its;
}
its->cmd_write = its->cmd_base;
+ its->fwnode_handle = handle;
+ its->get_msi_base = its_irq_get_msi_base;
+ its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
its_enable_quirks(its);
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index b5df99c6f680..b54b55597ffb 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -55,6 +55,7 @@ struct gic_chip_data {
struct irq_domain *domain;
u64 redist_stride;
u32 nr_redist_regions;
+ bool has_rss;
unsigned int irq_nr;
struct partition_desc *ppi_descs[16];
};
@@ -63,7 +64,9 @@ static struct gic_chip_data gic_data __read_mostly;
static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE;
static struct gic_kvm_info gic_v3_kvm_info;
+static DEFINE_PER_CPU(bool, has_rss);
+#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
#define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
@@ -526,6 +529,10 @@ static void gic_update_vlpi_properties(void)
static void gic_cpu_sys_reg_init(void)
{
+ int i, cpu = smp_processor_id();
+ u64 mpidr = cpu_logical_map(cpu);
+ u64 need_rss = MPIDR_RS(mpidr);
+
/*
* Need to check that the SRE bit has actually been set. If
* not, it means that SRE is disabled at EL2. We're going to
@@ -557,6 +564,30 @@ static void gic_cpu_sys_reg_init(void)
/* ... and let's hit the road... */
gic_write_grpen1(1);
+
+ /* Keep the RSS capability status in per_cpu variable */
+ per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
+
+ /* Check all the CPUs have capable of sending SGIs to other CPUs */
+ for_each_online_cpu(i) {
+ bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
+
+ need_rss |= MPIDR_RS(cpu_logical_map(i));
+ if (need_rss && (!have_rss))
+ pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
+ cpu, (unsigned long)mpidr,
+ i, (unsigned long)cpu_logical_map(i));
+ }
+
+ /**
+ * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
+ * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
+ * UNPREDICTABLE choice of :
+ * - The write is ignored.
+ * - The RS field is treated as 0.
+ */
+ if (need_rss && (!gic_data.has_rss))
+ pr_crit_once("RSS is required but GICD doesn't support it\n");
}
static int gic_dist_supports_lpis(void)
@@ -591,6 +622,9 @@ static void gic_cpu_init(void)
#ifdef CONFIG_SMP
+#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
+#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
+
static int gic_starting_cpu(unsigned int cpu)
{
gic_cpu_init();
@@ -605,13 +639,6 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
u16 tlist = 0;
while (cpu < nr_cpu_ids) {
- /*
- * If we ever get a cluster of more than 16 CPUs, just
- * scream and skip that CPU.
- */
- if (WARN_ON((mpidr & 0xff) >= 16))
- goto out;
-
tlist |= 1 << (mpidr & 0xf);
next_cpu = cpumask_next(cpu, mask);
@@ -621,7 +648,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
mpidr = cpu_logical_map(cpu);
- if (cluster_id != (mpidr & ~0xffUL)) {
+ if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
cpu--;
goto out;
}
@@ -643,6 +670,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
irq << ICC_SGI1R_SGI_ID_SHIFT |
MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
+ MPIDR_TO_SGI_RS(cluster_id) |
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
@@ -663,7 +691,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
smp_wmb();
for_each_cpu(cpu, mask) {
- unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL;
+ u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
u16 tlist;
tlist = gic_compute_target_list(&cpu, mask, cluster_id);
@@ -1007,6 +1035,10 @@ static int __init gic_init_bases(void __iomem *dist_base,
goto out_free;
}
+ gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
+ pr_info("Distributor has %sRange Selector support\n",
+ gic_data.has_rss ? "" : "no ");
+
set_handle_irq(gic_handle_irq);
gic_update_vlpi_properties();
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 651d726e8b12..f641e8e2c78d 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -1256,6 +1256,19 @@ static void gic_teardown(struct gic_chip_data *gic)
#ifdef CONFIG_OF
static int gic_cnt __initdata;
+static bool gicv2_force_probe;
+
+static int __init gicv2_force_probe_cfg(char *buf)
+{
+ return strtobool(buf, &gicv2_force_probe);
+}
+early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg);
+
+static bool gic_check_gicv2(void __iomem *base)
+{
+ u32 val = readl_relaxed(base + GIC_CPU_IDENT);
+ return (val & 0xff0fff) == 0x02043B;
+}
static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
{
@@ -1265,20 +1278,60 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base)
if (!is_hyp_mode_available())
return false;
- if (resource_size(&cpuif_res) < SZ_8K)
- return false;
- if (resource_size(&cpuif_res) == SZ_128K) {
- u32 val_low, val_high;
+ if (resource_size(&cpuif_res) < SZ_8K) {
+ void __iomem *alt;
+ /*
+ * Check for a stupid firmware that only exposes the
+ * first page of a GICv2.
+ */
+ if (!gic_check_gicv2(*base))
+ return false;
+ if (!gicv2_force_probe) {
+ pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n");
+ return false;
+ }
+
+ alt = ioremap(cpuif_res.start, SZ_8K);
+ if (!alt)
+ return false;
+ if (!gic_check_gicv2(alt + SZ_4K)) {
+ /*
+ * The first page was that of a GICv2, and
+ * the second was *something*. Let's trust it
+ * to be a GICv2, and update the mapping.
+ */
+ pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n",
+ &cpuif_res.start);
+ iounmap(*base);
+ *base = alt;
+ return true;
+ }
+
+ /*
+ * We detected *two* initial GICv2 pages in a
+ * row. Could be a GICv2 aliased over two 64kB
+ * pages. Update the resource, map the iospace, and
+ * pray.
+ */
+ iounmap(alt);
+ alt = ioremap(cpuif_res.start, SZ_128K);
+ if (!alt)
+ return false;
+ pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n",
+ &cpuif_res.start);
+ cpuif_res.end = cpuif_res.start + SZ_128K -1;
+ iounmap(*base);
+ *base = alt;
+ }
+ if (resource_size(&cpuif_res) == SZ_128K) {
/*
- * Verify that we have the first 4kB of a GIC400
+ * Verify that we have the first 4kB of a GICv2
* aliased over the first 64kB by checking the
* GICC_IIDR register on both ends.
*/
- val_low = readl_relaxed(*base + GIC_CPU_IDENT);
- val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000);
- if ((val_low & 0xffff0fff) != 0x0202043B ||
- val_low != val_high)
+ if (!gic_check_gicv2(*base) ||
+ !gic_check_gicv2(*base + 0xf000))
return false;
/*
diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c
new file mode 100644
index 000000000000..a59bdbc0b9bb
--- /dev/null
+++ b/drivers/irqchip/irq-meson-gpio.c
@@ -0,0 +1,419 @@
+/*
+ * Copyright (c) 2015 Endless Mobile, Inc.
+ * Author: Carlo Caione <carlo@endlessm.com>
+ * Copyright (c) 2016 BayLibre, SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irqchip.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+
+#define NUM_CHANNEL 8
+#define MAX_INPUT_MUX 256
+
+#define REG_EDGE_POL 0x00
+#define REG_PIN_03_SEL 0x04
+#define REG_PIN_47_SEL 0x08
+#define REG_FILTER_SEL 0x0c
+
+#define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x)))
+#define REG_EDGE_POL_EDGE(x) BIT(x)
+#define REG_EDGE_POL_LOW(x) BIT(16 + (x))
+#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8)
+#define REG_FILTER_SEL_SHIFT(x) ((x) * 4)
+
+struct meson_gpio_irq_params {
+ unsigned int nr_hwirq;
+};
+
+static const struct meson_gpio_irq_params meson8_params = {
+ .nr_hwirq = 134,
+};
+
+static const struct meson_gpio_irq_params meson8b_params = {
+ .nr_hwirq = 119,
+};
+
+static const struct meson_gpio_irq_params gxbb_params = {
+ .nr_hwirq = 133,
+};
+
+static const struct meson_gpio_irq_params gxl_params = {
+ .nr_hwirq = 110,
+};
+
+static const struct of_device_id meson_irq_gpio_matches[] = {
+ { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params },
+ { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params },
+ { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params },
+ { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params },
+ { }
+};
+
+struct meson_gpio_irq_controller {
+ unsigned int nr_hwirq;
+ void __iomem *base;
+ u32 channel_irqs[NUM_CHANNEL];
+ DECLARE_BITMAP(channel_map, NUM_CHANNEL);
+ spinlock_t lock;
+};
+
+static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl,
+ unsigned int reg, u32 mask, u32 val)
+{
+ u32 tmp;
+
+ tmp = readl_relaxed(ctl->base + reg);
+ tmp &= ~mask;
+ tmp |= val;
+ writel_relaxed(tmp, ctl->base + reg);
+}
+
+static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel)
+{
+ return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL;
+}
+
+static int
+meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl,
+ unsigned long hwirq,
+ u32 **channel_hwirq)
+{
+ unsigned int reg, idx;
+
+ spin_lock(&ctl->lock);
+
+ /* Find a free channel */
+ idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL);
+ if (idx >= NUM_CHANNEL) {
+ spin_unlock(&ctl->lock);
+ pr_err("No channel available\n");
+ return -ENOSPC;
+ }
+
+ /* Mark the channel as used */
+ set_bit(idx, ctl->channel_map);
+
+ /*
+ * Setup the mux of the channel to route the signal of the pad
+ * to the appropriate input of the GIC
+ */
+ reg = meson_gpio_irq_channel_to_reg(idx);
+ meson_gpio_irq_update_bits(ctl, reg,
+ 0xff << REG_PIN_SEL_SHIFT(idx),
+ hwirq << REG_PIN_SEL_SHIFT(idx));
+
+ /*
+ * Get the hwirq number assigned to this channel through
+ * a pointer the channel_irq table. The added benifit of this
+ * method is that we can also retrieve the channel index with
+ * it, using the table base.
+ */
+ *channel_hwirq = &(ctl->channel_irqs[idx]);
+
+ spin_unlock(&ctl->lock);
+
+ pr_debug("hwirq %lu assigned to channel %d - irq %u\n",
+ hwirq, idx, **channel_hwirq);
+
+ return 0;
+}
+
+static unsigned int
+meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl,
+ u32 *channel_hwirq)
+{
+ return channel_hwirq - ctl->channel_irqs;
+}
+
+static void
+meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl,
+ u32 *channel_hwirq)
+{
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+ clear_bit(idx, ctl->channel_map);
+}
+
+static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl,
+ unsigned int type,
+ u32 *channel_hwirq)
+{
+ u32 val = 0;
+ unsigned int idx;
+
+ idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq);
+
+ /*
+ * The controller has a filter block to operate in either LEVEL or
+ * EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and
+ * EDGE_FALLING support (which the GIC does not support), the filter
+ * block is also able to invert the input signal it gets before
+ * providing it to the GIC.
+ */
+ type &= IRQ_TYPE_SENSE_MASK;
+
+ if (type == IRQ_TYPE_EDGE_BOTH)
+ return -EINVAL;
+
+ if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_EDGE(idx);
+
+ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING))
+ val |= REG_EDGE_POL_LOW(idx);
+
+ spin_lock(&ctl->lock);
+
+ meson_gpio_irq_update_bits(ctl, REG_EDGE_POL,
+ REG_EDGE_POL_MASK(idx), val);
+
+ spin_unlock(&ctl->lock);
+
+ return 0;
+}
+
+static unsigned int meson_gpio_irq_type_output(unsigned int type)
+{
+ unsigned int sense = type & IRQ_TYPE_SENSE_MASK;
+
+ type &= ~IRQ_TYPE_SENSE_MASK;
+
+ /*
+ * The polarity of the signal provided to the GIC should always
+ * be high.
+ */
+ if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW))
+ type |= IRQ_TYPE_LEVEL_HIGH;
+ else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))
+ type |= IRQ_TYPE_EDGE_RISING;
+
+ return type;
+}
+
+static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type)
+{
+ struct meson_gpio_irq_controller *ctl = data->domain->host_data;
+ u32 *channel_hwirq = irq_data_get_irq_chip_data(data);
+ int ret;
+
+ ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq);
+ if (ret)
+ return ret;
+
+ return irq_chip_set_type_parent(data,
+ meson_gpio_irq_type_output(type));
+}
+
+static struct irq_chip meson_gpio_irq_chip = {
+ .name = "meson-gpio-irqchip",
+ .irq_mask = irq_chip_mask_parent,
+ .irq_unmask = irq_chip_unmask_parent,
+ .irq_eoi = irq_chip_eoi_parent,
+ .irq_set_type = meson_gpio_irq_set_type,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+#endif
+ .flags = IRQCHIP_SET_TYPE_MASKED,
+};
+
+static int meson_gpio_irq_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) {
+ *hwirq = fwspec->param[0];
+ *type = fwspec->param[1];
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain,
+ unsigned int virq,
+ u32 hwirq,
+ unsigned int type)
+{
+ struct irq_fwspec fwspec;
+
+ fwspec.fwnode = domain->parent->fwnode;
+ fwspec.param_count = 3;
+ fwspec.param[0] = 0; /* SPI */
+ fwspec.param[1] = hwirq;
+ fwspec.param[2] = meson_gpio_irq_type_output(type);
+
+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
+}
+
+static int meson_gpio_irq_domain_alloc(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs,
+ void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct meson_gpio_irq_controller *ctl = domain->host_data;
+ unsigned long hwirq;
+ u32 *channel_hwirq;
+ unsigned int type;
+ int ret;
+
+ if (WARN_ON(nr_irqs != 1))
+ return -EINVAL;
+
+ ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type);
+ if (ret)
+ return ret;
+
+ ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq);
+ if (ret)
+ return ret;
+
+ ret = meson_gpio_irq_allocate_gic_irq(domain, virq,
+ *channel_hwirq, type);
+ if (ret < 0) {
+ pr_err("failed to allocate gic irq %u\n", *channel_hwirq);
+ meson_gpio_irq_release_channel(ctl, channel_hwirq);
+ return ret;
+ }
+
+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
+ &meson_gpio_irq_chip, channel_hwirq);
+
+ return 0;
+}
+
+static void meson_gpio_irq_domain_free(struct irq_domain *domain,
+ unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct meson_gpio_irq_controller *ctl = domain->host_data;
+ struct irq_data *irq_data;
+ u32 *channel_hwirq;
+
+ if (WARN_ON(nr_irqs != 1))
+ return;
+
+ irq_domain_free_irqs_parent(domain, virq, 1);
+
+ irq_data = irq_domain_get_irq_data(domain, virq);
+ channel_hwirq = irq_data_get_irq_chip_data(irq_data);
+
+ meson_gpio_irq_release_channel(ctl, channel_hwirq);
+}
+
+static const struct irq_domain_ops meson_gpio_irq_domain_ops = {
+ .alloc = meson_gpio_irq_domain_alloc,
+ .free = meson_gpio_irq_domain_free,
+ .translate = meson_gpio_irq_domain_translate,
+};
+
+static int __init meson_gpio_irq_parse_dt(struct device_node *node,
+ struct meson_gpio_irq_controller *ctl)
+{
+ const struct of_device_id *match;
+ const struct meson_gpio_irq_params *params;
+ int ret;
+
+ match = of_match_node(meson_irq_gpio_matches, node);
+ if (!match)
+ return -ENODEV;
+
+ params = match->data;
+ ctl->nr_hwirq = params->nr_hwirq;
+
+ ret = of_property_read_variable_u32_array(node,
+ "amlogic,channel-interrupts",
+ ctl->channel_irqs,
+ NUM_CHANNEL,
+ NUM_CHANNEL);
+ if (ret < 0) {
+ pr_err("can't get %d channel interrupts\n", NUM_CHANNEL);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int __init meson_gpio_irq_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *domain, *parent_domain;
+ struct meson_gpio_irq_controller *ctl;
+ int ret;
+
+ if (!parent) {
+ pr_err("missing parent interrupt node\n");
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("unable to obtain parent domain\n");
+ return -ENXIO;
+ }
+
+ ctl = kzalloc(sizeof(*ctl), GFP_KERNEL);
+ if (!ctl)
+ return -ENOMEM;
+
+ spin_lock_init(&ctl->lock);
+
+ ctl->base = of_iomap(node, 0);
+ if (!ctl->base) {
+ ret = -ENOMEM;
+ goto free_ctl;
+ }
+
+ ret = meson_gpio_irq_parse_dt(node, ctl);
+ if (ret)
+ goto free_channel_irqs;
+
+ domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq,
+ of_node_to_fwnode(node),
+ &meson_gpio_irq_domain_ops,
+ ctl);
+ if (!domain) {
+ pr_err("failed to add domain\n");
+ ret = -ENODEV;
+ goto free_channel_irqs;
+ }
+
+ pr_info("%d to %d gpio interrupt mux initialized\n",
+ ctl->nr_hwirq, NUM_CHANNEL);
+
+ return 0;
+
+free_channel_irqs:
+ iounmap(ctl->base);
+free_ctl:
+ kfree(ctl);
+
+ return ret;
+}
+
+IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc",
+ meson_gpio_irq_of_init);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index c90976d7e53c..ef92a4d2038e 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -6,8 +6,12 @@
* Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
*/
+
+#define pr_fmt(fmt) "irq-mips-gic: " fmt
+
#include <linux/bitmap.h>
#include <linux/clocksource.h>
+#include <linux/cpuhotplug.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -48,12 +52,16 @@ static DEFINE_SPINLOCK(gic_lock);
static struct irq_domain *gic_irq_domain;
static struct irq_domain *gic_ipi_domain;
static int gic_shared_intrs;
-static int gic_vpes;
static unsigned int gic_cpu_pin;
static unsigned int timer_cpu_pin;
static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
-DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
-DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
+static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
+
+static struct gic_all_vpes_chip_data {
+ u32 map;
+ bool mask;
+} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS];
static void gic_clear_pcpu_masks(unsigned int intr)
{
@@ -194,46 +202,46 @@ static void gic_ack_irq(struct irq_data *d)
static int gic_set_type(struct irq_data *d, unsigned int type)
{
- unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
+ unsigned int irq, pol, trig, dual;
unsigned long flags;
- bool is_edge;
+
+ irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
spin_lock_irqsave(&gic_lock, flags);
switch (type & IRQ_TYPE_SENSE_MASK) {
case IRQ_TYPE_EDGE_FALLING:
- change_gic_pol(irq, GIC_POL_FALLING_EDGE);
- change_gic_trig(irq, GIC_TRIG_EDGE);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = true;
+ pol = GIC_POL_FALLING_EDGE;
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_SINGLE;
break;
case IRQ_TYPE_EDGE_RISING:
- change_gic_pol(irq, GIC_POL_RISING_EDGE);
- change_gic_trig(irq, GIC_TRIG_EDGE);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = true;
+ pol = GIC_POL_RISING_EDGE;
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_SINGLE;
break;
case IRQ_TYPE_EDGE_BOTH:
- /* polarity is irrelevant in this case */
- change_gic_trig(irq, GIC_TRIG_EDGE);
- change_gic_dual(irq, GIC_DUAL_DUAL);
- is_edge = true;
+ pol = 0; /* Doesn't matter */
+ trig = GIC_TRIG_EDGE;
+ dual = GIC_DUAL_DUAL;
break;
case IRQ_TYPE_LEVEL_LOW:
- change_gic_pol(irq, GIC_POL_ACTIVE_LOW);
- change_gic_trig(irq, GIC_TRIG_LEVEL);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = false;
+ pol = GIC_POL_ACTIVE_LOW;
+ trig = GIC_TRIG_LEVEL;
+ dual = GIC_DUAL_SINGLE;
break;
case IRQ_TYPE_LEVEL_HIGH:
default:
- change_gic_pol(irq, GIC_POL_ACTIVE_HIGH);
- change_gic_trig(irq, GIC_TRIG_LEVEL);
- change_gic_dual(irq, GIC_DUAL_SINGLE);
- is_edge = false;
+ pol = GIC_POL_ACTIVE_HIGH;
+ trig = GIC_TRIG_LEVEL;
+ dual = GIC_DUAL_SINGLE;
break;
}
- if (is_edge)
+ change_gic_pol(irq, pol);
+ change_gic_trig(irq, trig);
+ change_gic_dual(irq, dual);
+
+ if (trig == GIC_TRIG_EDGE)
irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
handle_edge_irq, NULL);
else
@@ -338,13 +346,17 @@ static struct irq_chip gic_local_irq_controller = {
static void gic_mask_local_irq_all_vpes(struct irq_data *d)
{
- int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- int i;
+ struct gic_all_vpes_chip_data *cd;
unsigned long flags;
+ int intr, cpu;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = false;
spin_lock_irqsave(&gic_lock, flags);
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_rmask(BIT(intr));
}
spin_unlock_irqrestore(&gic_lock, flags);
@@ -352,22 +364,40 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d)
static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
{
- int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
- int i;
+ struct gic_all_vpes_chip_data *cd;
unsigned long flags;
+ int intr, cpu;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+ cd->mask = true;
spin_lock_irqsave(&gic_lock, flags);
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
write_gic_vo_smask(BIT(intr));
}
spin_unlock_irqrestore(&gic_lock, flags);
}
+static void gic_all_vpes_irq_cpu_online(struct irq_data *d)
+{
+ struct gic_all_vpes_chip_data *cd;
+ unsigned int intr;
+
+ intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
+ cd = irq_data_get_irq_chip_data(d);
+
+ write_gic_vl_map(intr, cd->map);
+ if (cd->mask)
+ write_gic_vl_smask(BIT(intr));
+}
+
static struct irq_chip gic_all_vpes_local_irq_controller = {
- .name = "MIPS GIC Local",
- .irq_mask = gic_mask_local_irq_all_vpes,
- .irq_unmask = gic_unmask_local_irq_all_vpes,
+ .name = "MIPS GIC Local",
+ .irq_mask = gic_mask_local_irq_all_vpes,
+ .irq_unmask = gic_unmask_local_irq_all_vpes,
+ .irq_cpu_online = gic_all_vpes_irq_cpu_online,
};
static void __gic_irq_dispatch(void)
@@ -382,39 +412,6 @@ static void gic_irq_dispatch(struct irq_desc *desc)
gic_handle_shared_int(true);
}
-static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
- irq_hw_number_t hw)
-{
- int intr = GIC_HWIRQ_TO_LOCAL(hw);
- int i;
- unsigned long flags;
- u32 val;
-
- if (!gic_local_irq_is_routable(intr))
- return -EPERM;
-
- if (intr > GIC_LOCAL_INT_FDC) {
- pr_err("Invalid local IRQ %d\n", intr);
- return -EINVAL;
- }
-
- if (intr == GIC_LOCAL_INT_TIMER) {
- /* CONFIG_MIPS_CMP workaround (see __gic_init) */
- val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
- } else {
- val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
- }
-
- spin_lock_irqsave(&gic_lock, flags);
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
- write_gic_vo_map(intr, val);
- }
- spin_unlock_irqrestore(&gic_lock, flags);
-
- return 0;
-}
-
static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw, unsigned int cpu)
{
@@ -457,7 +454,11 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hwirq)
{
- int err;
+ struct gic_all_vpes_chip_data *cd;
+ unsigned long flags;
+ unsigned int intr;
+ int err, cpu;
+ u32 map;
if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
/* verify that shared irqs don't conflict with an IPI irq */
@@ -474,8 +475,14 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
return gic_shared_irq_domain_map(d, virq, hwirq, 0);
}
- switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
+ intr = GIC_HWIRQ_TO_LOCAL(hwirq);
+ map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin;
+
+ switch (intr) {
case GIC_LOCAL_INT_TIMER:
+ /* CONFIG_MIPS_CMP workaround (see __gic_init) */
+ map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin;
+ /* fall-through */
case GIC_LOCAL_INT_PERFCTR:
case GIC_LOCAL_INT_FDC:
/*
@@ -483,9 +490,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
* the rest of the MIPS kernel code does not use the
* percpu IRQ API for them.
*/
+ cd = &gic_all_vpes_chip_data[intr];
+ cd->map = map;
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_all_vpes_local_irq_controller,
- NULL);
+ cd);
if (err)
return err;
@@ -504,7 +513,17 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
break;
}
- return gic_local_irq_domain_map(d, virq, hwirq);
+ if (!gic_local_irq_is_routable(intr))
+ return -EPERM;
+
+ spin_lock_irqsave(&gic_lock, flags);
+ for_each_online_cpu(cpu) {
+ write_gic_vl_other(mips_cm_vp_id(cpu));
+ write_gic_vo_map(intr, map);
+ }
+ spin_unlock_irqrestore(&gic_lock, flags);
+
+ return 0;
}
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@@ -636,11 +655,25 @@ static const struct irq_domain_ops gic_ipi_domain_ops = {
.match = gic_ipi_domain_match,
};
+static int gic_cpu_startup(unsigned int cpu)
+{
+ /* Enable or disable EIC */
+ change_gic_vl_ctl(GIC_VX_CTL_EIC,
+ cpu_has_veic ? GIC_VX_CTL_EIC : 0);
+
+ /* Clear all local IRQ masks (ie. disable all local interrupts) */
+ write_gic_vl_rmask(~0);
+
+ /* Invoke irq_cpu_online callbacks to enable desired interrupts */
+ irq_cpu_online();
+
+ return 0;
+}
static int __init gic_of_init(struct device_node *node,
struct device_node *parent)
{
- unsigned int cpu_vec, i, j, gicconfig, cpu, v[2];
+ unsigned int cpu_vec, i, gicconfig, v[2], num_ipis;
unsigned long reserved;
phys_addr_t gic_base;
struct resource res;
@@ -655,7 +688,7 @@ static int __init gic_of_init(struct device_node *node,
cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM));
if (cpu_vec == hweight_long(ST0_IM)) {
- pr_err("No CPU vectors available for GIC\n");
+ pr_err("No CPU vectors available\n");
return -ENODEV;
}
@@ -668,8 +701,10 @@ static int __init gic_of_init(struct device_node *node,
gic_base = read_gcr_gic_base() &
~CM_GCR_GIC_BASE_GICEN;
gic_len = 0x20000;
+ pr_warn("Using inherited base address %pa\n",
+ &gic_base);
} else {
- pr_err("Failed to get GIC memory range\n");
+ pr_err("Failed to get memory range\n");
return -ENODEV;
}
} else {
@@ -690,17 +725,7 @@ static int __init gic_of_init(struct device_node *node,
gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS);
gic_shared_intrs = (gic_shared_intrs + 1) * 8;
- gic_vpes = gicconfig & GIC_CONFIG_PVPS;
- gic_vpes >>= __ffs(GIC_CONFIG_PVPS);
- gic_vpes = gic_vpes + 1;
-
if (cpu_has_veic) {
- /* Set EIC mode for all VPEs */
- for_each_present_cpu(cpu) {
- write_gic_vl_other(mips_cm_vp_id(cpu));
- write_gic_vo_ctl(GIC_VX_CTL_EIC);
- }
-
/* Always use vector 1 in EIC mode */
gic_cpu_pin = 0;
timer_cpu_pin = gic_cpu_pin;
@@ -737,7 +762,7 @@ static int __init gic_of_init(struct device_node *node,
gic_shared_intrs, 0,
&gic_irq_domain_ops, NULL);
if (!gic_irq_domain) {
- pr_err("Failed to add GIC IRQ domain");
+ pr_err("Failed to add IRQ domain");
return -ENXIO;
}
@@ -746,7 +771,7 @@ static int __init gic_of_init(struct device_node *node,
GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
node, &gic_ipi_domain_ops, NULL);
if (!gic_ipi_domain) {
- pr_err("Failed to add GIC IPI domain");
+ pr_err("Failed to add IPI domain");
return -ENXIO;
}
@@ -756,10 +781,12 @@ static int __init gic_of_init(struct device_node *node,
!of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
bitmap_set(ipi_resrv, v[0], v[1]);
} else {
- /* Make the last 2 * gic_vpes available for IPIs */
- bitmap_set(ipi_resrv,
- gic_shared_intrs - 2 * gic_vpes,
- 2 * gic_vpes);
+ /*
+ * Reserve 2 interrupts per possible CPU/VP for use as IPIs,
+ * meeting the requirements of arch/mips SMP.
+ */
+ num_ipis = 2 * num_possible_cpus();
+ bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis);
}
bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
@@ -773,15 +800,8 @@ static int __init gic_of_init(struct device_node *node,
write_gic_rmask(i);
}
- for (i = 0; i < gic_vpes; i++) {
- write_gic_vl_other(mips_cm_vp_id(i));
- for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
- if (!gic_local_irq_is_routable(j))
- continue;
- write_gic_vo_rmask(BIT(j));
- }
- }
-
- return 0;
+ return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ "irqchip/mips/gic:starting",
+ gic_cpu_startup, NULL);
}
IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index b04a8ac6e744..d360a6eddd6d 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -25,10 +25,6 @@
#include <linux/irqchip/irq-omap-intc.h>
-/* Define these here for now until we drop all board-files */
-#define OMAP24XX_IC_BASE 0x480fe000
-#define OMAP34XX_IC_BASE 0x48200000
-
/* selected INTC register offsets */
#define INTC_REVISION 0x0000
@@ -70,8 +66,8 @@ static struct omap_intc_regs intc_context;
static struct irq_domain *domain;
static void __iomem *omap_irq_base;
-static int omap_nr_pending = 3;
-static int omap_nr_irqs = 96;
+static int omap_nr_pending;
+static int omap_nr_irqs;
static void intc_writel(u32 reg, u32 val)
{
@@ -364,14 +360,6 @@ omap_intc_handle_irq(struct pt_regs *regs)
handle_domain_irq(domain, irqnr, regs);
}
-void __init omap3_init_irq(void)
-{
- omap_nr_irqs = 96;
- omap_nr_pending = 3;
- omap_init_irq(OMAP34XX_IC_BASE, NULL);
- set_handle_irq(omap_intc_handle_irq);
-}
-
static int __init intc_of_init(struct device_node *node,
struct device_node *parent)
{
diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c
new file mode 100644
index 000000000000..cf6d0c455518
--- /dev/null
+++ b/drivers/irqchip/irq-ompic.c
@@ -0,0 +1,202 @@
+/*
+ * Open Multi-Processor Interrupt Controller driver
+ *
+ * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
+ * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The ompic device handles IPI communication between cores in multi-core
+ * OpenRISC systems.
+ *
+ * Registers
+ *
+ * For each CPU the ompic has 2 registers. The control register for sending
+ * and acking IPIs and the status register for receiving IPIs. The register
+ * layouts are as follows:
+ *
+ * Control register
+ * +---------+---------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * ----------+---------+----------+----------
+ * | IRQ ACK | IRQ GEN | DST CORE | DATA |
+ * +---------+---------+----------+---------+
+ *
+ * Status register
+ * +----------+-------------+----------+---------+
+ * | 31 | 30 | 29 .. 16 | 15 .. 0 |
+ * -----------+-------------+----------+---------+
+ * | Reserved | IRQ Pending | SRC CORE | DATA |
+ * +----------+-------------+----------+---------+
+ *
+ * Architecture
+ *
+ * - The ompic generates a level interrupt to the CPU PIC when a message is
+ * ready. Messages are delivered via the memory bus.
+ * - The ompic does not have any interrupt input lines.
+ * - The ompic is wired to the same irq line on each core.
+ * - Devices are wired to the same irq line on each core.
+ *
+ * +---------+ +---------+
+ * | CPU | | CPU |
+ * | Core 0 |<==\ (memory access) /==>| Core 1 |
+ * | [ PIC ]| | | | [ PIC ]|
+ * +----^-^--+ | | +----^-^--+
+ * | | v v | |
+ * <====|=|=================================|=|==> (memory bus)
+ * | | ^ ^ | |
+ * (ipi | +------|---------+--------|-------|-+ (device irq)
+ * irq | | | | |
+ * core0)| +------|---------|--------|-------+ (ipi irq core1)
+ * | | | | |
+ * +----o-o-+ | +--------+ |
+ * | ompic |<===/ | Device |<===/
+ * | IPI | +--------+
+ * +--------+*
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+
+#include <linux/irqchip.h>
+
+#define OMPIC_CPUBYTES 8
+#define OMPIC_CTRL(cpu) (0x0 + (cpu * OMPIC_CPUBYTES))
+#define OMPIC_STAT(cpu) (0x4 + (cpu * OMPIC_CPUBYTES))
+
+#define OMPIC_CTRL_IRQ_ACK (1 << 31)
+#define OMPIC_CTRL_IRQ_GEN (1 << 30)
+#define OMPIC_CTRL_DST(cpu) (((cpu) & 0x3fff) << 16)
+
+#define OMPIC_STAT_IRQ_PENDING (1 << 30)
+
+#define OMPIC_DATA(x) ((x) & 0xffff)
+
+DEFINE_PER_CPU(unsigned long, ops);
+
+static void __iomem *ompic_base;
+
+static inline u32 ompic_readreg(void __iomem *base, loff_t offset)
+{
+ return ioread32be(base + offset);
+}
+
+static void ompic_writereg(void __iomem *base, loff_t offset, u32 data)
+{
+ iowrite32be(data, base + offset);
+}
+
+static void ompic_raise_softirq(const struct cpumask *mask,
+ unsigned int ipi_msg)
+{
+ unsigned int dst_cpu;
+ unsigned int src_cpu = smp_processor_id();
+
+ for_each_cpu(dst_cpu, mask) {
+ set_bit(ipi_msg, &per_cpu(ops, dst_cpu));
+
+ /*
+ * On OpenRISC the atomic set_bit() call implies a memory
+ * barrier. Otherwise we would need: smp_wmb(); paired
+ * with the read in ompic_ipi_handler.
+ */
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(src_cpu),
+ OMPIC_CTRL_IRQ_GEN |
+ OMPIC_CTRL_DST(dst_cpu) |
+ OMPIC_DATA(1));
+ }
+}
+
+static irqreturn_t ompic_ipi_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long *pending_ops = &per_cpu(ops, cpu);
+ unsigned long ops;
+
+ ompic_writereg(ompic_base, OMPIC_CTRL(cpu), OMPIC_CTRL_IRQ_ACK);
+ while ((ops = xchg(pending_ops, 0)) != 0) {
+
+ /*
+ * On OpenRISC the atomic xchg() call implies a memory
+ * barrier. Otherwise we may need an smp_rmb(); paired
+ * with the write in ompic_raise_softirq.
+ */
+
+ do {
+ unsigned long ipi_msg;
+
+ ipi_msg = __ffs(ops);
+ ops &= ~(1UL << ipi_msg);
+
+ handle_IPI(ipi_msg);
+ } while (ops);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int __init ompic_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct resource res;
+ int irq;
+ int ret;
+
+ /* Validate the DT */
+ if (ompic_base) {
+ pr_err("ompic: duplicate ompic's are not supported");
+ return -EEXIST;
+ }
+
+ if (of_address_to_resource(node, 0, &res)) {
+ pr_err("ompic: reg property requires an address and size");
+ return -EINVAL;
+ }
+
+ if (resource_size(&res) < (num_possible_cpus() * OMPIC_CPUBYTES)) {
+ pr_err("ompic: reg size, currently %d must be at least %d",
+ resource_size(&res),
+ (num_possible_cpus() * OMPIC_CPUBYTES));
+ return -EINVAL;
+ }
+
+ /* Setup the device */
+ ompic_base = ioremap(res.start, resource_size(&res));
+ if (IS_ERR(ompic_base)) {
+ pr_err("ompic: unable to map registers");
+ return PTR_ERR(ompic_base);
+ }
+
+ irq = irq_of_parse_and_map(node, 0);
+ if (irq <= 0) {
+ pr_err("ompic: unable to parse device irq");
+ ret = -EINVAL;
+ goto out_unmap;
+ }
+
+ ret = request_irq(irq, ompic_ipi_handler, IRQF_PERCPU,
+ "ompic_ipi", NULL);
+ if (ret)
+ goto out_irq_disp;
+
+ set_smp_cross_call(ompic_raise_softirq);
+
+ return 0;
+
+out_irq_disp:
+ irq_dispose_mapping(irq);
+out_unmap:
+ iounmap(ompic_base);
+ ompic_base = NULL;
+ return ret;
+}
+IRQCHIP_DECLARE(ompic, "openrisc,ompic", ompic_of_init);
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 713177d97c7a..06f29cf5018a 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -389,9 +389,8 @@ MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids);
static int intc_irqpin_probe(struct platform_device *pdev)
{
- const struct intc_irqpin_config *config = NULL;
+ const struct intc_irqpin_config *config;
struct device *dev = &pdev->dev;
- const struct of_device_id *of_id;
struct intc_irqpin_priv *p;
struct intc_irqpin_iomem *i;
struct resource *io[INTC_IRQPIN_REG_NR];
@@ -422,11 +421,9 @@ static int intc_irqpin_probe(struct platform_device *pdev)
p->pdev = pdev;
platform_set_drvdata(pdev, p);
- of_id = of_match_device(intc_irqpin_dt_ids, dev);
- if (of_id && of_id->data) {
- config = of_id->data;
+ config = of_device_get_match_data(dev);
+ if (config)
p->needs_clk = config->needs_clk;
- }
p->clk = devm_clk_get(dev, NULL);
if (IS_ERR(p->clk)) {
diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c
new file mode 100644
index 000000000000..1b6e2f7c59af
--- /dev/null
+++ b/drivers/irqchip/irq-sni-exiu.c
@@ -0,0 +1,227 @@
+/*
+ * Driver for Socionext External Interrupt Unit (EXIU)
+ *
+ * Copyright (c) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Based on irq-tegra.c:
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2010,2013, NVIDIA Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <dt-bindings/interrupt-controller/arm-gic.h>
+
+#define NUM_IRQS 32
+
+#define EIMASK 0x00
+#define EISRCSEL 0x04
+#define EIREQSTA 0x08
+#define EIRAWREQSTA 0x0C
+#define EIREQCLR 0x10
+#define EILVL 0x14
+#define EIEDG 0x18
+#define EISIR 0x1C
+
+struct exiu_irq_data {
+ void __iomem *base;
+ u32 spi_base;
+};
+
+static void exiu_irq_eoi(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+
+ writel(BIT(d->hwirq), data->base + EIREQCLR);
+ irq_chip_eoi_parent(d);
+}
+
+static void exiu_irq_mask(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EIMASK) | BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_mask_parent(d);
+}
+
+static void exiu_irq_unmask(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_unmask_parent(d);
+}
+
+static void exiu_irq_enable(struct irq_data *d)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ /* clear interrupts that were latched while disabled */
+ writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+ val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIMASK);
+ irq_chip_enable_parent(d);
+}
+
+static int exiu_irq_set_type(struct irq_data *d, unsigned int type)
+{
+ struct exiu_irq_data *data = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ val = readl_relaxed(data->base + EILVL);
+ if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH)
+ val |= BIT(d->hwirq);
+ else
+ val &= ~BIT(d->hwirq);
+ writel_relaxed(val, data->base + EILVL);
+
+ val = readl_relaxed(data->base + EIEDG);
+ if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH)
+ val &= ~BIT(d->hwirq);
+ else
+ val |= BIT(d->hwirq);
+ writel_relaxed(val, data->base + EIEDG);
+
+ writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR);
+
+ return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
+}
+
+static struct irq_chip exiu_irq_chip = {
+ .name = "EXIU",
+ .irq_eoi = exiu_irq_eoi,
+ .irq_enable = exiu_irq_enable,
+ .irq_mask = exiu_irq_mask,
+ .irq_unmask = exiu_irq_unmask,
+ .irq_set_type = exiu_irq_set_type,
+ .irq_set_affinity = irq_chip_set_affinity_parent,
+ .flags = IRQCHIP_SET_TYPE_MASKED |
+ IRQCHIP_SKIP_SET_WAKE |
+ IRQCHIP_EOI_THREADED |
+ IRQCHIP_MASK_ON_SUSPEND,
+};
+
+static int exiu_domain_translate(struct irq_domain *domain,
+ struct irq_fwspec *fwspec,
+ unsigned long *hwirq,
+ unsigned int *type)
+{
+ struct exiu_irq_data *info = domain->host_data;
+
+ if (is_of_node(fwspec->fwnode)) {
+ if (fwspec->param_count != 3)
+ return -EINVAL;
+
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ *hwirq = fwspec->param[1] - info->spi_base;
+ *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq,
+ unsigned int nr_irqs, void *data)
+{
+ struct irq_fwspec *fwspec = data;
+ struct irq_fwspec parent_fwspec;
+ struct exiu_irq_data *info = dom->host_data;
+ irq_hw_number_t hwirq;
+
+ if (fwspec->param_count != 3)
+ return -EINVAL; /* Not GIC compliant */
+ if (fwspec->param[0] != GIC_SPI)
+ return -EINVAL; /* No PPI should point to this domain */
+
+ WARN_ON(nr_irqs != 1);
+ hwirq = fwspec->param[1] - info->spi_base;
+ irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info);
+
+ parent_fwspec = *fwspec;
+ parent_fwspec.fwnode = dom->parent->fwnode;
+ return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec);
+}
+
+static const struct irq_domain_ops exiu_domain_ops = {
+ .translate = exiu_domain_translate,
+ .alloc = exiu_domain_alloc,
+ .free = irq_domain_free_irqs_common,
+};
+
+static int __init exiu_init(struct device_node *node,
+ struct device_node *parent)
+{
+ struct irq_domain *parent_domain, *domain;
+ struct exiu_irq_data *data;
+ int err;
+
+ if (!parent) {
+ pr_err("%pOF: no parent, giving up\n", node);
+ return -ENODEV;
+ }
+
+ parent_domain = irq_find_host(parent);
+ if (!parent_domain) {
+ pr_err("%pOF: unable to obtain parent domain\n", node);
+ return -ENXIO;
+ }
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ if (of_property_read_u32(node, "socionext,spi-base", &data->spi_base)) {
+ pr_err("%pOF: failed to parse 'spi-base' property\n", node);
+ err = -ENODEV;
+ goto out_free;
+ }
+
+ data->base = of_iomap(node, 0);
+ if (IS_ERR(data->base)) {
+ err = PTR_ERR(data->base);
+ goto out_free;
+ }
+
+ /* clear and mask all interrupts */
+ writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR);
+ writel_relaxed(0xFFFFFFFF, data->base + EIMASK);
+
+ domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node,
+ &exiu_domain_ops, data);
+ if (!domain) {
+ pr_err("%pOF: failed to allocate domain\n", node);
+ err = -ENOMEM;
+ goto out_unmap;
+ }
+
+ pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS,
+ parent);
+
+ return 0;
+
+out_unmap:
+ iounmap(data->base);
+out_free:
+ kfree(data);
+ return err;
+}
+IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_init);
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
index 45363ff8d06f..31ab0dee2ce7 100644
--- a/drivers/irqchip/irq-stm32-exti.c
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -14,27 +14,99 @@
#include <linux/of_address.h>
#include <linux/of_irq.h>
-#define EXTI_IMR 0x0
-#define EXTI_EMR 0x4
-#define EXTI_RTSR 0x8
-#define EXTI_FTSR 0xc
-#define EXTI_SWIER 0x10
-#define EXTI_PR 0x14
+#define IRQS_PER_BANK 32
+
+struct stm32_exti_bank {
+ u32 imr_ofst;
+ u32 emr_ofst;
+ u32 rtsr_ofst;
+ u32 ftsr_ofst;
+ u32 swier_ofst;
+ u32 pr_ofst;
+};
+
+static const struct stm32_exti_bank stm32f4xx_exti_b1 = {
+ .imr_ofst = 0x00,
+ .emr_ofst = 0x04,
+ .rtsr_ofst = 0x08,
+ .ftsr_ofst = 0x0C,
+ .swier_ofst = 0x10,
+ .pr_ofst = 0x14,
+};
+
+static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = {
+ &stm32f4xx_exti_b1,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b1 = {
+ .imr_ofst = 0x80,
+ .emr_ofst = 0x84,
+ .rtsr_ofst = 0x00,
+ .ftsr_ofst = 0x04,
+ .swier_ofst = 0x08,
+ .pr_ofst = 0x88,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b2 = {
+ .imr_ofst = 0x90,
+ .emr_ofst = 0x94,
+ .rtsr_ofst = 0x20,
+ .ftsr_ofst = 0x24,
+ .swier_ofst = 0x28,
+ .pr_ofst = 0x98,
+};
+
+static const struct stm32_exti_bank stm32h7xx_exti_b3 = {
+ .imr_ofst = 0xA0,
+ .emr_ofst = 0xA4,
+ .rtsr_ofst = 0x40,
+ .ftsr_ofst = 0x44,
+ .swier_ofst = 0x48,
+ .pr_ofst = 0xA8,
+};
+
+static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = {
+ &stm32h7xx_exti_b1,
+ &stm32h7xx_exti_b2,
+ &stm32h7xx_exti_b3,
+};
+
+static unsigned long stm32_exti_pending(struct irq_chip_generic *gc)
+{
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+
+ return irq_reg_readl(gc, stm32_bank->pr_ofst);
+}
+
+static void stm32_exti_irq_ack(struct irq_chip_generic *gc, u32 mask)
+{
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+
+ irq_reg_writel(gc, mask, stm32_bank->pr_ofst);
+}
static void stm32_irq_handler(struct irq_desc *desc)
{
struct irq_domain *domain = irq_desc_get_handler_data(desc);
- struct irq_chip_generic *gc = domain->gc->gc[0];
struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned int virq, nbanks = domain->gc->num_chips;
+ struct irq_chip_generic *gc;
+ const struct stm32_exti_bank *stm32_bank;
unsigned long pending;
- int n;
+ int n, i, irq_base = 0;
chained_irq_enter(chip, desc);
- while ((pending = irq_reg_readl(gc, EXTI_PR))) {
- for_each_set_bit(n, &pending, BITS_PER_LONG) {
- generic_handle_irq(irq_find_mapping(domain, n));
- irq_reg_writel(gc, BIT(n), EXTI_PR);
+ for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) {
+ gc = irq_get_domain_generic_chip(domain, irq_base);
+ stm32_bank = gc->private;
+
+ while ((pending = stm32_exti_pending(gc))) {
+ for_each_set_bit(n, &pending, IRQS_PER_BANK) {
+ virq = irq_find_mapping(domain, irq_base + n);
+ generic_handle_irq(virq);
+ stm32_exti_irq_ack(gc, BIT(n));
+ }
}
}
@@ -44,13 +116,14 @@ static void stm32_irq_handler(struct irq_desc *desc)
static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- int pin = data->hwirq;
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+ int pin = data->hwirq % IRQS_PER_BANK;
u32 rtsr, ftsr;
irq_gc_lock(gc);
- rtsr = irq_reg_readl(gc, EXTI_RTSR);
- ftsr = irq_reg_readl(gc, EXTI_FTSR);
+ rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst);
+ ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst);
switch (type) {
case IRQ_TYPE_EDGE_RISING:
@@ -70,8 +143,8 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
return -EINVAL;
}
- irq_reg_writel(gc, rtsr, EXTI_RTSR);
- irq_reg_writel(gc, ftsr, EXTI_FTSR);
+ irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst);
+ irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst);
irq_gc_unlock(gc);
@@ -81,17 +154,18 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
static int stm32_irq_set_wake(struct irq_data *data, unsigned int on)
{
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
- int pin = data->hwirq;
- u32 emr;
+ const struct stm32_exti_bank *stm32_bank = gc->private;
+ int pin = data->hwirq % IRQS_PER_BANK;
+ u32 imr;
irq_gc_lock(gc);
- emr = irq_reg_readl(gc, EXTI_EMR);
+ imr = irq_reg_readl(gc, stm32_bank->imr_ofst);
if (on)
- emr |= BIT(pin);
+ imr |= BIT(pin);
else
- emr &= ~BIT(pin);
- irq_reg_writel(gc, emr, EXTI_EMR);
+ imr &= ~BIT(pin);
+ irq_reg_writel(gc, imr, stm32_bank->imr_ofst);
irq_gc_unlock(gc);
@@ -101,11 +175,12 @@ static int stm32_irq_set_wake(struct irq_data *data, unsigned int on)
static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
unsigned int nr_irqs, void *data)
{
- struct irq_chip_generic *gc = d->gc->gc[0];
+ struct irq_chip_generic *gc;
struct irq_fwspec *fwspec = data;
irq_hw_number_t hwirq;
hwirq = fwspec->param[0];
+ gc = irq_get_domain_generic_chip(d, hwirq);
irq_map_generic_chip(d, virq, hwirq);
irq_domain_set_info(d, virq, hwirq, &gc->chip_types->chip, gc,
@@ -129,8 +204,9 @@ struct irq_domain_ops irq_exti_domain_ops = {
.free = stm32_exti_free,
};
-static int __init stm32_exti_init(struct device_node *node,
- struct device_node *parent)
+static int
+__init stm32_exti_init(const struct stm32_exti_bank **stm32_exti_banks,
+ int bank_nr, struct device_node *node)
{
unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
int nr_irqs, nr_exti, ret, i;
@@ -144,23 +220,16 @@ static int __init stm32_exti_init(struct device_node *node,
return -ENOMEM;
}
- /* Determine number of irqs supported */
- writel_relaxed(~0UL, base + EXTI_RTSR);
- nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
- writel_relaxed(0, base + EXTI_RTSR);
-
- pr_info("%pOF: %d External IRQs detected\n", node, nr_exti);
-
- domain = irq_domain_add_linear(node, nr_exti,
+ domain = irq_domain_add_linear(node, bank_nr * IRQS_PER_BANK,
&irq_exti_domain_ops, NULL);
if (!domain) {
pr_err("%s: Could not register interrupt domain.\n",
- node->name);
+ node->name);
ret = -ENOMEM;
goto out_unmap;
}
- ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
+ ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti",
handle_edge_irq, clr, 0, 0);
if (ret) {
pr_err("%pOF: Could not allocate generic interrupt chip.\n",
@@ -168,18 +237,41 @@ static int __init stm32_exti_init(struct device_node *node,
goto out_free_domain;
}
- gc = domain->gc->gc[0];
- gc->reg_base = base;
- gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
- gc->chip_types->chip.name = gc->chip_types[0].chip.name;
- gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit;
- gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
- gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
- gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
- gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake;
- gc->chip_types->regs.ack = EXTI_PR;
- gc->chip_types->regs.mask = EXTI_IMR;
- gc->chip_types->handler = handle_edge_irq;
+ for (i = 0; i < bank_nr; i++) {
+ const struct stm32_exti_bank *stm32_bank = stm32_exti_banks[i];
+ u32 irqs_mask;
+
+ gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK);
+
+ gc->reg_base = base;
+ gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
+ gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit;
+ gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
+ gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
+ gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake;
+ gc->chip_types->regs.ack = stm32_bank->pr_ofst;
+ gc->chip_types->regs.mask = stm32_bank->imr_ofst;
+ gc->private = (void *)stm32_bank;
+
+ /* Determine number of irqs supported */
+ writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst);
+ irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst);
+ nr_exti = fls(readl_relaxed(base + stm32_bank->rtsr_ofst));
+
+ /*
+ * This IP has no reset, so after hot reboot we should
+ * clear registers to avoid residue
+ */
+ writel_relaxed(0, base + stm32_bank->imr_ofst);
+ writel_relaxed(0, base + stm32_bank->emr_ofst);
+ writel_relaxed(0, base + stm32_bank->rtsr_ofst);
+ writel_relaxed(0, base + stm32_bank->ftsr_ofst);
+ writel_relaxed(~0UL, base + stm32_bank->pr_ofst);
+
+ pr_info("%s: bank%d, External IRQs available:%#x\n",
+ node->full_name, i, irqs_mask);
+ }
nr_irqs = of_irq_count(node);
for (i = 0; i < nr_irqs; i++) {
@@ -198,4 +290,20 @@ out_unmap:
return ret;
}
-IRQCHIP_DECLARE(stm32_exti, "st,stm32-exti", stm32_exti_init);
+static int __init stm32f4_exti_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return stm32_exti_init(stm32f4xx_exti_banks,
+ ARRAY_SIZE(stm32f4xx_exti_banks), np);
+}
+
+IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init);
+
+static int __init stm32h7_exti_of_init(struct device_node *np,
+ struct device_node *parent)
+{
+ return stm32_exti_init(stm32h7xx_exti_banks,
+ ARRAY_SIZE(stm32h7xx_exti_banks), np);
+}
+
+IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init);