aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/irq/cpuhotplug.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-06-20 01:37:29 +0200
committerThomas Gleixner <tglx@linutronix.de>2017-06-22 18:21:17 +0200
commitf0383c24b4855f6a4b5a358c7b2d2c16e0437e9b (patch)
tree50d47ed9185fd2625db74530f7388e2e360e6e9e /kernel/irq/cpuhotplug.c
parentgenirq/cpuhotplug: Do not migrated shutdown irqs (diff)
downloadwireguard-linux-f0383c24b4855f6a4b5a358c7b2d2c16e0437e9b.tar.xz
wireguard-linux-f0383c24b4855f6a4b5a358c7b2d2c16e0437e9b.zip
genirq/cpuhotplug: Add support for cleaning up move in progress
In order to move x86 to the generic hotplug migration code, add support for cleaning up move in progress bits. On architectures which have this x86 specific (mis)feature not enabled, this is optimized out by the compiler. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Keith Busch <keith.busch@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Link: http://lkml.kernel.org/r/20170619235445.525817311@linutronix.de
Diffstat (limited to 'kernel/irq/cpuhotplug.c')
-rw-r--r--kernel/irq/cpuhotplug.c28
1 files changed, 26 insertions, 2 deletions
diff --git a/kernel/irq/cpuhotplug.c b/kernel/irq/cpuhotplug.c
index 09b20e127aee..4be4bd669d81 100644
--- a/kernel/irq/cpuhotplug.c
+++ b/kernel/irq/cpuhotplug.c
@@ -18,7 +18,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
{
struct irq_data *d = irq_desc_get_irq_data(desc);
struct irq_chip *chip = irq_data_get_irq_chip(d);
- const struct cpumask *affinity = d->common->affinity;
+ const struct cpumask *affinity;
bool brokeaff = false;
int err;
@@ -41,9 +41,33 @@ static bool migrate_one_irq(struct irq_desc *desc)
* Note: Do not check desc->action as this might be a chained
* interrupt.
*/
+ affinity = irq_data_get_affinity_mask(d);
if (irqd_is_per_cpu(d) || !irqd_is_started(d) ||
- !cpumask_test_cpu(smp_processor_id(), affinity))
+ !cpumask_test_cpu(smp_processor_id(), affinity)) {
+ /*
+ * If an irq move is pending, abort it if the dying CPU is
+ * the sole target.
+ */
+ irq_fixup_move_pending(desc, false);
return false;
+ }
+
+ /*
+ * Complete an eventually pending irq move cleanup. If this
+ * interrupt was moved in hard irq context, then the vectors need
+ * to be cleaned up. It can't wait until this interrupt actually
+ * happens and this CPU was involved.
+ */
+ irq_force_complete_move(desc);
+
+ /*
+ * If there is a setaffinity pending, then try to reuse the pending
+ * mask, so the last change of the affinity does not get lost. If
+ * there is no move pending or the pending mask does not contain
+ * any online CPU, use the current affinity mask.
+ */
+ if (irq_fixup_move_pending(desc, true))
+ affinity = irq_desc_get_pending_mask(desc);
if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
affinity = cpu_online_mask;