aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9dbdccab3b6a..84b54a17b95d 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -393,6 +393,9 @@ int irq_setup_affinity(struct irq_desc *desc)
}
cpumask_and(&mask, cpu_online_mask, set);
+ if (cpumask_empty(&mask))
+ cpumask_copy(&mask, cpu_online_mask);
+
if (node != NUMA_NO_NODE) {
const struct cpumask *nodemask = cpumask_of_node(node);
@@ -915,7 +918,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
#endif
/*
- * Interrupts which are not explicitely requested as threaded
+ * Interrupts which are not explicitly requested as threaded
* interrupts rely on the implicit bh/preempt disable of the hard irq
* context. So we need to disable bh here to avoid deadlocks and other
* side effects.