aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/cpuhotplug.c21
-rw-r--r--kernel/irq/irqdesc.c1
-rw-r--r--kernel/irq/irqdomain.c18
-rw-r--r--kernel/irq/manage.c45
-rw-r--r--kernel/irq/spurious.c1
5 files changed, 81 insertions, 5 deletions
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 6c7ca2e983a5..02236b13b359 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -12,6 +12,7 @@
#include <linux/interrupt.h>
#include <linux/ratelimit.h>
#include <linux/irq.h>
+#include <linux/sched/isolation.h>
#include "internals.h"
@@ -171,6 +172,20 @@ void irq_migrate_all_off_this_cpu(void)
}
}
+static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
+{
+ const struct cpumask *hk_mask;
+
+ if (!housekeeping_enabled(HK_FLAG_MANAGED_IRQ))
+ return false;
+
+ hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
+ if (cpumask_subset(irq_data_get_effective_affinity_mask(data), hk_mask))
+ return false;
+
+ return cpumask_test_cpu(cpu, hk_mask);
+}
+
static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
@@ -188,9 +203,11 @@ static void irq_restore_affinity_of_irq(struct irq_desc *desc, unsigned int cpu)
/*
* If the interrupt can only be directed to a single target
* CPU then it is already assigned to a CPU in the affinity
- * mask. No point in trying to move it around.
+ * mask. No point in trying to move it around unless the
+ * isolation mechanism requests to move it to an upcoming
+ * housekeeping CPU.
*/
- if (!irqd_is_single_target(data))
+ if (!irqd_is_single_target(data) || hk_should_isolate(data, cpu))
irq_set_affinity_locked(data, affinity, false);
}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 5b8fdd659e54..98a5f10d1900 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -891,6 +891,7 @@ __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
}
void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
+ __releases(&desc->lock)
{
raw_spin_unlock_irqrestore(&desc->lock, flags);
if (bus)
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index dd822fd8a7d5..7527e5ef6fe5 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -987,6 +987,23 @@ const struct irq_domain_ops irq_domain_simple_ops = {
EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
/**
+ * irq_domain_translate_onecell() - Generic translate for direct one cell
+ * bindings
+ */
+int irq_domain_translate_onecell(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ if (WARN_ON(fwspec->param_count < 1))
+ return -EINVAL;
+ *out_hwirq = fwspec->param[0];
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(irq_domain_translate_onecell);
+
+/**
* irq_domain_translate_twocell() - Generic translate for direct two cell
* bindings
*
@@ -1459,6 +1476,7 @@ int irq_domain_push_irq(struct irq_domain *domain, int virq, void *arg)
if (rv) {
/* Restore the original irq_data. */
*root_irq_data = *child_irq_data;
+ kfree(child_irq_data);
goto error;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1753486b440c..818b2802d3e7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/sched/task.h>
+#include <linux/sched/isolation.h>
#include <uapi/linux/sched/types.h>
#include <linux/task_work.h>
@@ -217,7 +218,45 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
if (!chip || !chip->irq_set_affinity)
return -EINVAL;
- ret = chip->irq_set_affinity(data, mask, force);
+ /*
+ * If this is a managed interrupt and housekeeping is enabled on
+ * it check whether the requested affinity mask intersects with
+ * a housekeeping CPU. If so, then remove the isolated CPUs from
+ * the mask and just keep the housekeeping CPU(s). This prevents
+ * the affinity setter from routing the interrupt to an isolated
+ * CPU to avoid that I/O submitted from a housekeeping CPU causes
+ * interrupts on an isolated one.
+ *
+ * If the masks do not intersect or include online CPU(s) then
+ * keep the requested mask. The isolated target CPUs are only
+ * receiving interrupts when the I/O operation was submitted
+ * directly from them.
+ *
+ * If all housekeeping CPUs in the affinity mask are offline, the
+ * interrupt will be migrated by the CPU hotplug code once a
+ * housekeeping CPU which belongs to the affinity mask comes
+ * online.
+ */
+ if (irqd_affinity_is_managed(data) &&
+ housekeeping_enabled(HK_FLAG_MANAGED_IRQ)) {
+ const struct cpumask *hk_mask, *prog_mask;
+
+ static DEFINE_RAW_SPINLOCK(tmp_mask_lock);
+ static struct cpumask tmp_mask;
+
+ hk_mask = housekeeping_cpumask(HK_FLAG_MANAGED_IRQ);
+
+ raw_spin_lock(&tmp_mask_lock);
+ cpumask_and(&tmp_mask, mask, hk_mask);
+ if (!cpumask_intersects(&tmp_mask, cpu_online_mask))
+ prog_mask = mask;
+ else
+ prog_mask = &tmp_mask;
+ ret = chip->irq_set_affinity(data, prog_mask, force);
+ raw_spin_unlock(&tmp_mask_lock);
+ } else {
+ ret = chip->irq_set_affinity(data, mask, force);
+ }
switch (ret) {
case IRQ_SET_MASK_OK:
case IRQ_SET_MASK_OK_DONE:
@@ -1500,8 +1539,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
* has. The type flags are unreliable as the
* underlying chip implementation can override them.
*/
- pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
- irq);
+ pr_err("Threaded irq requested with handler=NULL and !ONESHOT for %s (irq %d)\n",
+ new->name, irq);
ret = -EINVAL;
goto out_unlock;
}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 2ed97a7c9b2a..f865e5f4d382 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -34,6 +34,7 @@ static atomic_t irq_poll_active;
* true and let the handler run.
*/
bool irq_wait_for_poll(struct irq_desc *desc)
+ __must_hold(&desc->lock)
{
if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
"irq poll in progress on cpu %d for irq %d\n",