From 07d36c9e84002678d75b97d2361eb6302a718359 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:03 +0000 Subject: x86/vdso: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Acked-by: Andy Lutomirski Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153332.987560239@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 386374d19987..fe056408335e 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -14,6 +14,7 @@ enum cpuhp_state { CPUHP_TEARDOWN_CPU, CPUHP_AP_ONLINE_IDLE, CPUHP_AP_SMPBOOT_THREADS, + CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From 93131f7a9b5c261f9995b4a6cbd7dbbec1351d2f Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:04 +0000 Subject: irqchip/gic: Convert to hotplug state machine More or less straightforward, although this driver sports some very interesting SMP setup code. Regarding the callback ordering, this deleted comment is interesting: ... the GIC needs to be up before the ARM generic timers. That comment is half baken as the same requirement is true for perf. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Acked-by: Jason Cooper Cc: Linus Torvalds Cc: Marc Zyngier Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.069777215@linutronix.de Signed-off-by: Ingo Molnar --- drivers/irqchip/irq-gic.c | 23 ++++++----------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 7 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index fbc4ae2afd29..606f114166a1 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -984,25 +984,12 @@ static int gic_irq_domain_translate(struct irq_domain *d, return -EINVAL; } -#ifdef CONFIG_SMP -static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int gic_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - gic_cpu_init(&gic_data[0]); - return NOTIFY_OK; + gic_cpu_init(&gic_data[0]); + return 0; } -/* - * Notifier for enabling the GIC CPU interface. Set an arbitrarily high - * priority because the GIC needs to be up before the ARM generic timers. - */ -static struct notifier_block gic_cpu_notifier = { - .notifier_call = gic_secondary_init, - .priority = 100, -}; -#endif - static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { @@ -1148,8 +1135,10 @@ static int __init __gic_init_bases(struct gic_chip_data *gic, int irq_start, gic_cpu_map[i] = 0xff; #ifdef CONFIG_SMP set_smp_cross_call(gic_raise_softirq); - register_cpu_notifier(&gic_cpu_notifier); #endif + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, + "AP_IRQ_GIC_STARTING", + gic_starting_cpu, NULL); set_handle_irq(gic_handle_irq); if (static_key_true(&supports_deactivate)) pr_info("GIC: Using split EOI/Deactivate mode\n"); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index fe056408335e..edc8adf26410 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -9,6 +9,7 @@ enum cpuhp_state { CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, CPUHP_AP_SCHED_STARTING, + CPUHP_AP_IRQ_GIC_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 6670a6d864653859229d2e921ea193a4a677dfea Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:05 +0000 Subject: irqchip/gicv3: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Acked-by: Jason Cooper Cc: Linus Torvalds Cc: Marc Zyngier Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.163186301@linutronix.de Signed-off-by: Ingo Molnar --- drivers/irqchip/irq-gic-v3.c | 22 +++++++--------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 8 insertions(+), 15 deletions(-) (limited to 'include/linux') diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2c5ba0e704bf..6fc56c3466b0 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -538,23 +538,13 @@ static void gic_cpu_init(void) } #ifdef CONFIG_SMP -static int gic_secondary_init(struct notifier_block *nfb, - unsigned long action, void *hcpu) + +static int gic_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - gic_cpu_init(); - return NOTIFY_OK; + gic_cpu_init(); + return 0; } -/* - * Notifier for enabling the GIC CPU interface. Set an arbitrarily high - * priority because the GIC needs to be up before the ARM generic timers. - */ -static struct notifier_block gic_cpu_notifier = { - .notifier_call = gic_secondary_init, - .priority = 100, -}; - static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, unsigned long cluster_id) { @@ -634,7 +624,9 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) static void gic_smp_init(void) { set_smp_cross_call(gic_raise_softirq); - register_cpu_notifier(&gic_cpu_notifier); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GICV3_STARTING, + "AP_IRQ_GICV3_STARTING", gic_starting_cpu, + NULL); } static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index edc8adf26410..d6e4a06d5020 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -10,6 +10,7 @@ enum cpuhp_state { CPUHP_AP_OFFLINE, CPUHP_AP_SCHED_STARTING, CPUHP_AP_IRQ_GIC_STARTING, + CPUHP_AP_IRQ_GICV3_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 6c034d1736384c23ea97509e36e84f35f4a9b23c Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:06 +0000 Subject: irqchip/hip04: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Acked-by: Jason Cooper Cc: Linus Torvalds Cc: Marc Zyngier Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.244546182@linutronix.de Signed-off-by: Ingo Molnar --- drivers/irqchip/irq-hip04.c | 25 +++++-------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 6 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c index 9e25d8ce08e5..021b0e0833c1 100644 --- a/drivers/irqchip/irq-hip04.c +++ b/drivers/irqchip/irq-hip04.c @@ -342,26 +342,12 @@ static int hip04_irq_domain_xlate(struct irq_domain *d, return ret; } -#ifdef CONFIG_SMP -static int hip04_irq_secondary_init(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +static int hip04_irq_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - hip04_irq_cpu_init(&hip04_data); - return NOTIFY_OK; + hip04_irq_cpu_init(&hip04_data); + return 0; } -/* - * Notifier for enabling the INTC CPU interface. Set an arbitrarily high - * priority because the GIC needs to be up before the ARM generic timers. - */ -static struct notifier_block hip04_irq_cpu_notifier = { - .notifier_call = hip04_irq_secondary_init, - .priority = 100, -}; -#endif - static const struct irq_domain_ops hip04_irq_domain_ops = { .map = hip04_irq_domain_map, .xlate = hip04_irq_domain_xlate, @@ -417,13 +403,12 @@ hip04_of_init(struct device_node *node, struct device_node *parent) #ifdef CONFIG_SMP set_smp_cross_call(hip04_raise_softirq); - register_cpu_notifier(&hip04_irq_cpu_notifier); #endif set_handle_irq(hip04_handle_irq); hip04_irq_dist_init(&hip04_data); - hip04_irq_cpu_init(&hip04_data); - + cpuhp_setup_state(CPUHP_AP_IRQ_HIP04_STARTING, "AP_IRQ_HIP04_STARTING", + hip04_irq_starting_cpu, NULL); return 0; } IRQCHIP_DECLARE(hip04_intc, "hisilicon,hip04-intc", hip04_of_init); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d6e4a06d5020..62ab4ff98521 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -11,6 +11,7 @@ enum cpuhp_state { CPUHP_AP_SCHED_STARTING, CPUHP_AP_IRQ_GIC_STARTING, CPUHP_AP_IRQ_GICV3_STARTING, + CPUHP_AP_IRQ_HIP04_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From cb5ff2d245c1023f1004eca73038b2cae87bcc6a Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:07 +0000 Subject: irqchip/armada-370-xp: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Acked-by: Jason Cooper Cc: Linus Torvalds Cc: Marc Zyngier Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.330661455@linutronix.de Signed-off-by: Ingo Molnar --- drivers/irqchip/irq-armada-370-xp.c | 44 +++++++++++++------------------------ include/linux/cpuhotplug.h | 2 ++ 2 files changed, 17 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index e7dc6cbda2a1..e2b8feedc5d8 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -344,40 +344,22 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask, writel((map << 8) | irq, main_int_base + ARMADA_370_XP_SW_TRIG_INT_OFFS); } +#endif -static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int armada_xp_mpic_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { - armada_xp_mpic_perf_init(); - armada_xp_mpic_smp_cpu_init(); - } - - return NOTIFY_OK; + armada_xp_mpic_perf_init(); + armada_xp_mpic_smp_cpu_init(); + return 0; } -static struct notifier_block armada_370_xp_mpic_cpu_notifier = { - .notifier_call = armada_xp_mpic_secondary_init, - .priority = 100, -}; - -static int mpic_cascaded_secondary_init(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int mpic_cascaded_starting_cpu(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) { - armada_xp_mpic_perf_init(); - enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); - } - - return NOTIFY_OK; + armada_xp_mpic_perf_init(); + enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); + return 0; } -static struct notifier_block mpic_cascaded_cpu_notifier = { - .notifier_call = mpic_cascaded_secondary_init, - .priority = 100, -}; -#endif /* CONFIG_SMP */ - static const struct irq_domain_ops armada_370_xp_mpic_irq_ops = { .map = armada_370_xp_mpic_irq_map, .xlate = irq_domain_xlate_onecell, @@ -595,11 +577,15 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, set_handle_irq(armada_370_xp_handle_irq); #ifdef CONFIG_SMP set_smp_cross_call(armada_mpic_send_doorbell); - register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING, + "AP_IRQ_ARMADA_XP_STARTING", + armada_xp_mpic_starting_cpu, NULL); #endif } else { #ifdef CONFIG_SMP - register_cpu_notifier(&mpic_cascaded_cpu_notifier); + cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_CASC_STARTING, + "AP_IRQ_ARMADA_CASC_STARTING", + mpic_cascaded_starting_cpu, NULL); #endif irq_set_chained_handler(parent_irq, armada_370_xp_mpic_handle_cascade_irq); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 62ab4ff98521..3a08ab962346 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -12,6 +12,8 @@ enum cpuhp_state { CPUHP_AP_IRQ_GIC_STARTING, CPUHP_AP_IRQ_GICV3_STARTING, CPUHP_AP_IRQ_HIP04_STARTING, + CPUHP_AP_IRQ_ARMADA_XP_STARTING, + CPUHP_AP_IRQ_ARMADA_CASC_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 7ca04bc277431a271061dd85f98b6da4171ad2e4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:07 +0000 Subject: irqchip/bcm2836: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Acked-by: Jason Cooper Cc: Linus Torvalds Cc: Marc Zyngier Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.416260485@linutronix.de Signed-off-by: Ingo Molnar --- drivers/irqchip/irq-bcm2836.c | 34 +++++++++++++--------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 14 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c index 72ff1d5c5de6..f2575cb2b013 100644 --- a/drivers/irqchip/irq-bcm2836.c +++ b/drivers/irqchip/irq-bcm2836.c @@ -202,26 +202,19 @@ static void bcm2836_arm_irqchip_send_ipi(const struct cpumask *mask, } } -/* Unmasks the IPI on the CPU when it's online. */ -static int bcm2836_arm_irqchip_cpu_notify(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int bcm2836_cpu_starting(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; - unsigned int int_reg = LOCAL_MAILBOX_INT_CONTROL0; - unsigned int mailbox = 0; - - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - bcm2836_arm_irqchip_unmask_per_cpu_irq(int_reg, mailbox, cpu); - else if (action == CPU_DYING) - bcm2836_arm_irqchip_mask_per_cpu_irq(int_reg, mailbox, cpu); - - return NOTIFY_OK; + bcm2836_arm_irqchip_unmask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0, + cpu); + return 0; } -static struct notifier_block bcm2836_arm_irqchip_cpu_notifier = { - .notifier_call = bcm2836_arm_irqchip_cpu_notify, - .priority = 100, -}; +static int bcm2836_cpu_dying(unsigned int cpu) +{ + bcm2836_arm_irqchip_mask_per_cpu_irq(LOCAL_MAILBOX_INT_CONTROL0, 0, + cpu); + return 0; +} #ifdef CONFIG_ARM int __init bcm2836_smp_boot_secondary(unsigned int cpu, @@ -251,10 +244,9 @@ bcm2836_arm_irqchip_smp_init(void) { #ifdef CONFIG_SMP /* Unmask IPIs to the boot CPU. */ - bcm2836_arm_irqchip_cpu_notify(&bcm2836_arm_irqchip_cpu_notifier, - CPU_STARTING, - (void *)(uintptr_t)smp_processor_id()); - register_cpu_notifier(&bcm2836_arm_irqchip_cpu_notifier); + cpuhp_setup_state(CPUHP_AP_IRQ_BCM2836_STARTING, + "AP_IRQ_BCM2836_STARTING", bcm2836_cpu_starting, + bcm2836_cpu_dying); set_smp_cross_call(bcm2836_arm_irqchip_send_ipi); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 3a08ab962346..420cfcf78908 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -14,6 +14,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_HIP04_STARTING, CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_ARMADA_CASC_STARTING, + CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 7fbbaebf8cd18cca58e84837fd80c07ea8d8bd56 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:08 +0000 Subject: ARM/mvebu: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Andrew Lunn Cc: Gregory Clement Cc: Jason Cooper Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russell King Cc: Sebastian Hesselbarth Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.503198935@linutronix.de Signed-off-by: Ingo Molnar --- arch/arm/mach-mvebu/coherency.c | 19 ++++++------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 7 insertions(+), 13 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c index 7e989d61159c..77aaa5243a20 100644 --- a/arch/arm/mach-mvebu/coherency.c +++ b/arch/arm/mach-mvebu/coherency.c @@ -111,20 +111,12 @@ static struct notifier_block mvebu_hwcc_pci_nb __maybe_unused = { .notifier_call = mvebu_hwcc_notifier, }; -static int armada_xp_clear_shared_l2_notifier_func(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int armada_xp_clear_l2_starting(unsigned int cpu) { - if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - armada_xp_clear_shared_l2(); - - return NOTIFY_OK; + armada_xp_clear_shared_l2(); + return 0; } -static struct notifier_block armada_xp_clear_shared_l2_notifier = { - .notifier_call = armada_xp_clear_shared_l2_notifier_func, - .priority = 100, -}; - static void __init armada_370_coherency_init(struct device_node *np) { struct resource res; @@ -155,8 +147,9 @@ static void __init armada_370_coherency_init(struct device_node *np) of_node_put(cpu_config_np); - register_cpu_notifier(&armada_xp_clear_shared_l2_notifier); - + cpuhp_setup_state_nocalls(CPUHP_AP_ARM_MVEBU_COHERENCY, + "AP_ARM_MVEBU_COHERENCY", + armada_xp_clear_l2_starting, NULL); exit: set_cpu_coherent(); } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 420cfcf78908..d769ec941fd3 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -15,6 +15,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_ARMADA_CASC_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, + CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 00e16c3d68fce504e880f59c9bdf23b2a4759d6d Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:09 +0000 Subject: perf/core: Convert to hotplug state machine Actually a nice symmetric startup/teardown pair which fits properly into the state machine concept. In the long run we should be able to invoke the startup callback for the boot CPU via the state machine and get rid of the init function which invokes it on the boot CPU. Note: This comes actually before the perf hardware callbacks. In the notifier model the hardware callbacks have a higher priority than the core callback. But that's solely for CPU offline so that hardware migration of events happens before the core is notified about the outgoing CPU. With the symetric state array model we have the following ordering: UP: core -> hardware DOWN: hardware -> core Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Jiri Olsa Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Rasmus Villemoes Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.587514098@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 2 ++ include/linux/perf_event.h | 9 ++++++++ kernel/cpu.c | 11 +++++++++ kernel/events/core.c | 56 +++++++++------------------------------------- 4 files changed, 32 insertions(+), 46 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d769ec941fd3..067082e3fd41 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -4,6 +4,7 @@ enum cpuhp_state { CPUHP_OFFLINE, CPUHP_CREATE_THREADS, + CPUHP_PERF_PREPARE, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, @@ -22,6 +23,7 @@ enum cpuhp_state { CPUHP_AP_ONLINE_IDLE, CPUHP_AP_SMPBOOT_THREADS, CPUHP_AP_X86_VDSO_VMA_ONLINE, + CPUHP_AP_PERF_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1a827cecd62f..9abeb6948e70 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1354,4 +1354,13 @@ _name##_show(struct device *dev, \ \ static struct device_attribute format_attr_##_name = __ATTR_RO(_name) +/* Performance counter hotplug functions */ +#ifdef CONFIG_PERF_EVENTS +int perf_event_init_cpu(unsigned int cpu); +int perf_event_exit_cpu(unsigned int cpu); +#else +#define perf_event_init_cpu NULL +#define perf_event_exit_cpu NULL +#endif + #endif /* _LINUX_PERF_EVENT_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index fe71ce4e60f1..3705d9043c08 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1180,6 +1180,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { .teardown = NULL, .cant_stop = true, }, + [CPUHP_PERF_PREPARE] = { + .name = "perf prepare", + .startup = perf_event_init_cpu, + .teardown = perf_event_exit_cpu, + }, /* * Preparatory and dead notifiers. Will be replaced once the notifiers * are converted to states. @@ -1257,6 +1262,12 @@ static struct cpuhp_step cpuhp_ap_states[] = { .startup = smpboot_unpark_threads, .teardown = NULL, }, + [CPUHP_AP_PERF_ONLINE] = { + .name = "perf online", + .startup = perf_event_init_cpu, + .teardown = perf_event_exit_cpu, + }, + /* * Online/down_prepare notifiers. Will be removed once the notifiers * are converted to states. diff --git a/kernel/events/core.c b/kernel/events/core.c index 43d43a2d5811..f3ef1c29a7c9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10255,7 +10255,7 @@ static void __init perf_event_init_all_cpus(void) } } -static void perf_event_init_cpu(int cpu) +int perf_event_init_cpu(unsigned int cpu) { struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); @@ -10268,6 +10268,7 @@ static void perf_event_init_cpu(int cpu) rcu_assign_pointer(swhash->swevent_hlist, hlist); } mutex_unlock(&swhash->hlist_mutex); + return 0; } #if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC_CORE @@ -10299,14 +10300,17 @@ static void perf_event_exit_cpu_context(int cpu) } srcu_read_unlock(&pmus_srcu, idx); } +#else + +static void perf_event_exit_cpu_context(int cpu) { } + +#endif -static void perf_event_exit_cpu(int cpu) +int perf_event_exit_cpu(unsigned int cpu) { perf_event_exit_cpu_context(cpu); + return 0; } -#else -static inline void perf_event_exit_cpu(int cpu) { } -#endif static int perf_reboot(struct notifier_block *notifier, unsigned long val, void *v) @@ -10328,46 +10332,6 @@ static struct notifier_block perf_reboot_notifier = { .priority = INT_MIN, }; -static int -perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - - case CPU_UP_PREPARE: - /* - * This must be done before the CPU comes alive, because the - * moment we can run tasks we can encounter (software) events. - * - * Specifically, someone can have inherited events on kthreadd - * or a pre-existing worker thread that gets re-bound. - */ - perf_event_init_cpu(cpu); - break; - - case CPU_DOWN_PREPARE: - /* - * This must be done before the CPU dies because after that an - * active event might want to IPI the CPU and that'll not work - * so great for dead CPUs. - * - * XXX smp_call_function_single() return -ENXIO without a warn - * so we could possibly deal with this. - * - * This is safe against new events arriving because - * sys_perf_event_open() serializes against hotplug using - * get_online_cpus(). - */ - perf_event_exit_cpu(cpu); - break; - default: - break; - } - - return NOTIFY_OK; -} - void __init perf_event_init(void) { int ret; @@ -10380,7 +10344,7 @@ void __init perf_event_init(void) perf_pmu_register(&perf_cpu_clock, NULL, -1); perf_pmu_register(&perf_task_clock, NULL, -1); perf_tp_register(); - perf_cpu_notifier(perf_cpu_notify); + perf_event_init_cpu(smp_processor_id()); register_reboot_notifier(&perf_reboot_notifier); ret = init_hw_breakpoint(); -- cgit v1.2.3-59-g8ed1b From 95ca792c7582fde2c435f84cbe872616e3c951f6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:10 +0000 Subject: perf/x86: Convert the core to the hotplug state machine Replace the perf_notifier() install mechanism, which invokes magically the callback on the current CPU. Convert the hardware specific callbacks which are invoked from the x86 perf core to return proper error codes instead of totally pointless NOTIFY_BAD return values. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Adam Borowski Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Vince Weaver Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.670720553@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/amd/core.c | 6 +-- arch/x86/events/core.c | 103 +++++++++++++++++++++++++++---------------- arch/x86/events/intel/core.c | 4 +- include/linux/cpuhotplug.h | 3 ++ 4 files changed, 74 insertions(+), 42 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index bd3e8421b57c..e07a22bb9308 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -370,13 +370,13 @@ static int amd_pmu_cpu_prepare(int cpu) WARN_ON_ONCE(cpuc->amd_nb); if (!x86_pmu.amd_nb_constraints) - return NOTIFY_OK; + return 0; cpuc->amd_nb = amd_alloc_nb(cpu); if (!cpuc->amd_nb) - return NOTIFY_BAD; + return -ENOMEM; - return NOTIFY_OK; + return 0; } static void amd_pmu_cpu_starting(int cpu) diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 26ced536005a..4ce3745f26f5 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -1477,49 +1477,49 @@ NOKPROBE_SYMBOL(perf_event_nmi_handler); struct event_constraint emptyconstraint; struct event_constraint unconstrained; -static int -x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) +static int x86_pmu_prepare_cpu(unsigned int cpu) { - unsigned int cpu = (long)hcpu; struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); - int i, ret = NOTIFY_OK; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) - cpuc->kfree_on_online[i] = NULL; - if (x86_pmu.cpu_prepare) - ret = x86_pmu.cpu_prepare(cpu); - break; - - case CPU_STARTING: - if (x86_pmu.cpu_starting) - x86_pmu.cpu_starting(cpu); - break; + int i; - case CPU_ONLINE: - for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) { - kfree(cpuc->kfree_on_online[i]); - cpuc->kfree_on_online[i] = NULL; - } - break; + for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) + cpuc->kfree_on_online[i] = NULL; + if (x86_pmu.cpu_prepare) + return x86_pmu.cpu_prepare(cpu); + return 0; +} - case CPU_DYING: - if (x86_pmu.cpu_dying) - x86_pmu.cpu_dying(cpu); - break; +static int x86_pmu_dead_cpu(unsigned int cpu) +{ + if (x86_pmu.cpu_dead) + x86_pmu.cpu_dead(cpu); + return 0; +} - case CPU_UP_CANCELED: - case CPU_DEAD: - if (x86_pmu.cpu_dead) - x86_pmu.cpu_dead(cpu); - break; +static int x86_pmu_online_cpu(unsigned int cpu) +{ + struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + int i; - default: - break; + for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) { + kfree(cpuc->kfree_on_online[i]); + cpuc->kfree_on_online[i] = NULL; } + return 0; +} - return ret; +static int x86_pmu_starting_cpu(unsigned int cpu) +{ + if (x86_pmu.cpu_starting) + x86_pmu.cpu_starting(cpu); + return 0; +} + +static int x86_pmu_dying_cpu(unsigned int cpu) +{ + if (x86_pmu.cpu_dying) + x86_pmu.cpu_dying(cpu); + return 0; } static void __init pmu_check_apic(void) @@ -1764,10 +1764,39 @@ static int __init init_hw_perf_events(void) pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed); pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl); - perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); - perf_cpu_notifier(x86_pmu_notifier); + /* + * Install callbacks. Core will call them for each online + * cpu. + */ + err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE", + x86_pmu_prepare_cpu, x86_pmu_dead_cpu); + if (err) + return err; + + err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING, + "AP_PERF_X86_STARTING", x86_pmu_starting_cpu, + x86_pmu_dying_cpu); + if (err) + goto out; + + err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE", + x86_pmu_online_cpu, NULL); + if (err) + goto out1; + + err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); + if (err) + goto out2; return 0; + +out2: + cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE); +out1: + cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING); +out: + cpuhp_remove_state(CPUHP_PERF_X86_PREPARE); + return err; } early_initcall(init_hw_perf_events); diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 9b4f9d3ce465..6a1441be485b 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3050,7 +3050,7 @@ static int intel_pmu_cpu_prepare(int cpu) cpuc->excl_thread_id = 0; } - return NOTIFY_OK; + return 0; err_constraint_list: kfree(cpuc->constraint_list); @@ -3061,7 +3061,7 @@ err_shared_regs: cpuc->shared_regs = NULL; err: - return NOTIFY_BAD; + return -ENOMEM; } static void intel_pmu_cpu_starting(int cpu) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 067082e3fd41..858915f40fb3 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -5,6 +5,7 @@ enum cpuhp_state { CPUHP_OFFLINE, CPUHP_CREATE_THREADS, CPUHP_PERF_PREPARE, + CPUHP_PERF_X86_PREPARE, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, @@ -17,6 +18,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_CASC_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, + CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, @@ -24,6 +26,7 @@ enum cpuhp_state { CPUHP_AP_SMPBOOT_THREADS, CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_PERF_ONLINE, + CPUHP_AP_PERF_X86_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From 1a246b9f58c6149b5a5bec081418b8ed890e0dfe Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:12 +0000 Subject: perf/x86/intel/uncore: Convert to hotplug state machine Convert the notifiers to state machine states and let the core code do the setup for the already online CPUs. This notifier has a completely undocumented ordering requirement versus perf hardcoded in the notifier priority. This odering is only required for CPU down, so that hardware migration happens before the core is notified about the outgoing CPU. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Vince Weaver Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.752695801@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/intel/uncore.c | 133 ++++++++++++++--------------------------- include/linux/cpuhotplug.h | 3 + 2 files changed, 48 insertions(+), 88 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c index fce74062d981..8e280a7cd0cc 100644 --- a/arch/x86/events/intel/uncore.c +++ b/arch/x86/events/intel/uncore.c @@ -1034,7 +1034,7 @@ static void uncore_pci_exit(void) } } -static void uncore_cpu_dying(int cpu) +static int uncore_cpu_dying(unsigned int cpu) { struct intel_uncore_type *type, **types = uncore_msr_uncores; struct intel_uncore_pmu *pmu; @@ -1051,16 +1051,19 @@ static void uncore_cpu_dying(int cpu) uncore_box_exit(box); } } + return 0; } -static void uncore_cpu_starting(int cpu, bool init) +static int first_init; + +static int uncore_cpu_starting(unsigned int cpu) { struct intel_uncore_type *type, **types = uncore_msr_uncores; struct intel_uncore_pmu *pmu; struct intel_uncore_box *box; int i, pkg, ncpus = 1; - if (init) { + if (first_init) { /* * On init we get the number of online cpus in the package * and set refcount for all of them. @@ -1081,9 +1084,11 @@ static void uncore_cpu_starting(int cpu, bool init) uncore_box_init(box); } } + + return 0; } -static int uncore_cpu_prepare(int cpu) +static int uncore_cpu_prepare(unsigned int cpu) { struct intel_uncore_type *type, **types = uncore_msr_uncores; struct intel_uncore_pmu *pmu; @@ -1146,13 +1151,13 @@ static void uncore_change_context(struct intel_uncore_type **uncores, uncore_change_type_ctx(*uncores, old_cpu, new_cpu); } -static void uncore_event_exit_cpu(int cpu) +static int uncore_event_cpu_offline(unsigned int cpu) { int target; /* Check if exiting cpu is used for collecting uncore events */ if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) - return; + return 0; /* Find a new cpu to collect uncore events */ target = cpumask_any_but(topology_core_cpumask(cpu), cpu); @@ -1165,9 +1170,10 @@ static void uncore_event_exit_cpu(int cpu) uncore_change_context(uncore_msr_uncores, cpu, target); uncore_change_context(uncore_pci_uncores, cpu, target); + return 0; } -static void uncore_event_init_cpu(int cpu) +static int uncore_event_cpu_online(unsigned int cpu) { int target; @@ -1177,50 +1183,15 @@ static void uncore_event_init_cpu(int cpu) */ target = cpumask_any_and(&uncore_cpu_mask, topology_core_cpumask(cpu)); if (target < nr_cpu_ids) - return; + return 0; cpumask_set_cpu(cpu, &uncore_cpu_mask); uncore_change_context(uncore_msr_uncores, -1, cpu); uncore_change_context(uncore_pci_uncores, -1, cpu); + return 0; } -static int uncore_cpu_notifier(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - return notifier_from_errno(uncore_cpu_prepare(cpu)); - - case CPU_STARTING: - uncore_cpu_starting(cpu, false); - case CPU_DOWN_FAILED: - uncore_event_init_cpu(cpu); - break; - - case CPU_UP_CANCELED: - case CPU_DYING: - uncore_cpu_dying(cpu); - break; - - case CPU_DOWN_PREPARE: - uncore_event_exit_cpu(cpu); - break; - } - return NOTIFY_OK; -} - -static struct notifier_block uncore_cpu_nb = { - .notifier_call = uncore_cpu_notifier, - /* - * to migrate uncore events, our notifier should be executed - * before perf core's notifier. - */ - .priority = CPU_PRI_PERF + 1, -}; - static int __init type_pmu_register(struct intel_uncore_type *type) { int i, ret; @@ -1264,41 +1235,6 @@ err: return ret; } -static void __init uncore_cpu_setup(void *dummy) -{ - uncore_cpu_starting(smp_processor_id(), true); -} - -/* Lazy to avoid allocation of a few bytes for the normal case */ -static __initdata DECLARE_BITMAP(packages, MAX_LOCAL_APIC); - -static int __init uncore_cpumask_init(bool msr) -{ - unsigned int cpu; - - for_each_online_cpu(cpu) { - unsigned int pkg = topology_logical_package_id(cpu); - int ret; - - if (test_and_set_bit(pkg, packages)) - continue; - /* - * The first online cpu of each package allocates and takes - * the refcounts for all other online cpus in that package. - * If msrs are not enabled no allocation is required. - */ - if (msr) { - ret = uncore_cpu_prepare(cpu); - if (ret) - return ret; - } - uncore_event_init_cpu(cpu); - smp_call_function_single(cpu, uncore_cpu_setup, NULL, 1); - } - __register_cpu_notifier(&uncore_cpu_nb); - return 0; -} - #define X86_UNCORE_MODEL_MATCH(model, init) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&init } @@ -1420,11 +1356,33 @@ static int __init intel_uncore_init(void) if (cret && pret) return -ENODEV; - cpu_notifier_register_begin(); - ret = uncore_cpumask_init(!cret); - if (ret) - goto err; - cpu_notifier_register_done(); + /* + * Install callbacks. Core will call them for each online cpu. + * + * The first online cpu of each package allocates and takes + * the refcounts for all other online cpus in that package. + * If msrs are not enabled no allocation is required and + * uncore_cpu_prepare() is not called for each online cpu. + */ + if (!cret) { + ret = cpuhp_setup_state(CPUHP_PERF_X86_UNCORE_PREP, + "PERF_X86_UNCORE_PREP", + uncore_cpu_prepare, NULL); + if (ret) + goto err; + } else { + cpuhp_setup_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP, + "PERF_X86_UNCORE_PREP", + uncore_cpu_prepare, NULL); + } + first_init = 1; + cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_STARTING, + "AP_PERF_X86_UNCORE_STARTING", + uncore_cpu_starting, uncore_cpu_dying); + first_init = 0; + cpuhp_setup_state(CPUHP_AP_PERF_X86_UNCORE_ONLINE, + "AP_PERF_X86_UNCORE_ONLINE", + uncore_event_cpu_online, uncore_event_cpu_offline); return 0; err: @@ -1432,17 +1390,16 @@ err: on_each_cpu_mask(&uncore_cpu_mask, uncore_exit_boxes, NULL, 1); uncore_types_exit(uncore_msr_uncores); uncore_pci_exit(); - cpu_notifier_register_done(); return ret; } module_init(intel_uncore_init); static void __exit intel_uncore_exit(void) { - cpu_notifier_register_begin(); - __unregister_cpu_notifier(&uncore_cpu_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_ONLINE); + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_UNCORE_STARTING); + cpuhp_remove_state_nocalls(CPUHP_PERF_X86_UNCORE_PREP); uncore_types_exit(uncore_msr_uncores); uncore_pci_exit(); - cpu_notifier_register_done(); } module_exit(intel_uncore_exit); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 858915f40fb3..6f91e94c9cc9 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -6,6 +6,7 @@ enum cpuhp_state { CPUHP_CREATE_THREADS, CPUHP_PERF_PREPARE, CPUHP_PERF_X86_PREPARE, + CPUHP_PERF_X86_UNCORE_PREP, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, @@ -18,6 +19,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_CASC_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, + CPUHP_AP_PERF_X86_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, @@ -27,6 +29,7 @@ enum cpuhp_state { CPUHP_AP_X86_VDSO_VMA_ONLINE, CPUHP_AP_PERF_ONLINE, CPUHP_AP_PERF_X86_ONLINE, + CPUHP_AP_PERF_X86_UNCORE_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From 96b2bd3866a0b045330e420a2f1829ff2a3399bc Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:13 +0000 Subject: perf/x86/amd/uncore: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Chen Yucong Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.839150380@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/amd/uncore.c | 122 ++++++++++++------------------------------- include/linux/cpuhotplug.h | 3 ++ 2 files changed, 35 insertions(+), 90 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/events/amd/uncore.c b/arch/x86/events/amd/uncore.c index 98ac57381bf9..e6131d4454e6 100644 --- a/arch/x86/events/amd/uncore.c +++ b/arch/x86/events/amd/uncore.c @@ -358,7 +358,7 @@ amd_uncore_find_online_sibling(struct amd_uncore *this, return this; } -static void amd_uncore_cpu_starting(unsigned int cpu) +static int amd_uncore_cpu_starting(unsigned int cpu) { unsigned int eax, ebx, ecx, edx; struct amd_uncore *uncore; @@ -384,6 +384,8 @@ static void amd_uncore_cpu_starting(unsigned int cpu) uncore = amd_uncore_find_online_sibling(uncore, amd_uncore_l2); *per_cpu_ptr(amd_uncore_l2, cpu) = uncore; } + + return 0; } static void uncore_online(unsigned int cpu, @@ -398,13 +400,15 @@ static void uncore_online(unsigned int cpu, cpumask_set_cpu(cpu, uncore->active_mask); } -static void amd_uncore_cpu_online(unsigned int cpu) +static int amd_uncore_cpu_online(unsigned int cpu) { if (amd_uncore_nb) uncore_online(cpu, amd_uncore_nb); if (amd_uncore_l2) uncore_online(cpu, amd_uncore_l2); + + return 0; } static void uncore_down_prepare(unsigned int cpu, @@ -433,13 +437,15 @@ static void uncore_down_prepare(unsigned int cpu, } } -static void amd_uncore_cpu_down_prepare(unsigned int cpu) +static int amd_uncore_cpu_down_prepare(unsigned int cpu) { if (amd_uncore_nb) uncore_down_prepare(cpu, amd_uncore_nb); if (amd_uncore_l2) uncore_down_prepare(cpu, amd_uncore_l2); + + return 0; } static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) @@ -454,74 +460,19 @@ static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) *per_cpu_ptr(uncores, cpu) = NULL; } -static void amd_uncore_cpu_dead(unsigned int cpu) +static int amd_uncore_cpu_dead(unsigned int cpu) { if (amd_uncore_nb) uncore_dead(cpu, amd_uncore_nb); if (amd_uncore_l2) uncore_dead(cpu, amd_uncore_l2); -} - -static int -amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, - void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - if (amd_uncore_cpu_up_prepare(cpu)) - return notifier_from_errno(-ENOMEM); - break; - - case CPU_STARTING: - amd_uncore_cpu_starting(cpu); - break; - - case CPU_ONLINE: - amd_uncore_cpu_online(cpu); - break; - - case CPU_DOWN_PREPARE: - amd_uncore_cpu_down_prepare(cpu); - break; - - case CPU_UP_CANCELED: - case CPU_DEAD: - amd_uncore_cpu_dead(cpu); - break; - - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block amd_uncore_cpu_notifier_block = { - .notifier_call = amd_uncore_cpu_notifier, - .priority = CPU_PRI_PERF + 1, -}; - -static void __init init_cpu_already_online(void *dummy) -{ - unsigned int cpu = smp_processor_id(); - - amd_uncore_cpu_starting(cpu); - amd_uncore_cpu_online(cpu); -} -static void cleanup_cpu_online(void *dummy) -{ - unsigned int cpu = smp_processor_id(); - - amd_uncore_cpu_dead(cpu); + return 0; } static int __init amd_uncore_init(void) { - unsigned int cpu, cpu2; int ret = -ENODEV; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) @@ -558,38 +509,29 @@ static int __init amd_uncore_init(void) ret = 0; } - if (ret) - goto fail_nodev; - - cpu_notifier_register_begin(); - - /* init cpus already online before registering for hotplug notifier */ - for_each_online_cpu(cpu) { - ret = amd_uncore_cpu_up_prepare(cpu); - if (ret) - goto fail_online; - smp_call_function_single(cpu, init_cpu_already_online, NULL, 1); - } - - __register_cpu_notifier(&amd_uncore_cpu_notifier_block); - cpu_notifier_register_done(); - + /* + * Install callbacks. Core will call them for each online cpu. + */ + if (cpuhp_setup_state(CPUHP_PERF_X86_AMD_UNCORE_PREP, + "PERF_X86_AMD_UNCORE_PREP", + amd_uncore_cpu_up_prepare, amd_uncore_cpu_dead)) + goto fail_l2; + + if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, + "AP_PERF_X86_AMD_UNCORE_STARTING", + amd_uncore_cpu_starting, NULL)) + goto fail_prep; + if (cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, + "AP_PERF_X86_AMD_UNCORE_ONLINE", + amd_uncore_cpu_online, + amd_uncore_cpu_down_prepare)) + goto fail_start; return 0; - -fail_online: - for_each_online_cpu(cpu2) { - if (cpu2 == cpu) - break; - smp_call_function_single(cpu, cleanup_cpu_online, NULL, 1); - } - cpu_notifier_register_done(); - - /* amd_uncore_nb/l2 should have been freed by cleanup_cpu_online */ - amd_uncore_nb = amd_uncore_l2 = NULL; - - if (boot_cpu_has(X86_FEATURE_PERFCTR_L2)) - perf_pmu_unregister(&amd_l2_pmu); +fail_start: + cpuhp_remove_state(CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING); +fail_prep: + cpuhp_remove_state(CPUHP_PERF_X86_AMD_UNCORE_PREP); fail_l2: if (boot_cpu_has(X86_FEATURE_PERFCTR_NB)) perf_pmu_unregister(&amd_nb_pmu); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 6f91e94c9cc9..569d057cc7d5 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -7,6 +7,7 @@ enum cpuhp_state { CPUHP_PERF_PREPARE, CPUHP_PERF_X86_PREPARE, CPUHP_PERF_X86_UNCORE_PREP, + CPUHP_PERF_X86_AMD_UNCORE_PREP, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, @@ -20,6 +21,7 @@ enum cpuhp_state { CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_ARM_MVEBU_COHERENCY, CPUHP_AP_PERF_X86_UNCORE_STARTING, + CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, @@ -30,6 +32,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ONLINE, CPUHP_AP_PERF_X86_ONLINE, CPUHP_AP_PERF_X86_UNCORE_ONLINE, + CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From 9744f7b7b37e78d7f18050856e626aea33058787 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:14 +0000 Subject: perf/x86/amd/ibs: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Chen Yucong Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Vince Weaver Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153333.921401190@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/amd/ibs.c | 64 ++++++++++++++++++++-------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 29 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c index feb90f6730e8..1a59a181582b 100644 --- a/arch/x86/events/amd/ibs.c +++ b/arch/x86/events/amd/ibs.c @@ -721,13 +721,10 @@ static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name) return ret; } -static __init int perf_event_ibs_init(void) +static __init void perf_event_ibs_init(void) { struct attribute **attr = ibs_op_format_attrs; - if (!ibs_caps) - return -ENODEV; /* ibs not supported by the cpu */ - perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch"); if (ibs_caps & IBS_CAPS_OPCNT) { @@ -738,13 +735,11 @@ static __init int perf_event_ibs_init(void) register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs"); pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps); - - return 0; } #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */ -static __init int perf_event_ibs_init(void) { return 0; } +static __init void perf_event_ibs_init(void) { } #endif @@ -921,7 +916,7 @@ static inline int get_ibs_lvt_offset(void) return val & IBSCTL_LVT_OFFSET_MASK; } -static void setup_APIC_ibs(void *dummy) +static void setup_APIC_ibs(void) { int offset; @@ -936,7 +931,7 @@ failed: smp_processor_id()); } -static void clear_APIC_ibs(void *dummy) +static void clear_APIC_ibs(void) { int offset; @@ -945,18 +940,24 @@ static void clear_APIC_ibs(void *dummy) setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); } +static int x86_pmu_amd_ibs_starting_cpu(unsigned int cpu) +{ + setup_APIC_ibs(); + return 0; +} + #ifdef CONFIG_PM static int perf_ibs_suspend(void) { - clear_APIC_ibs(NULL); + clear_APIC_ibs(); return 0; } static void perf_ibs_resume(void) { ibs_eilvt_setup(); - setup_APIC_ibs(NULL); + setup_APIC_ibs(); } static struct syscore_ops perf_ibs_syscore_ops = { @@ -975,27 +976,15 @@ static inline void perf_ibs_pm_init(void) { } #endif -static int -perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) +static int x86_pmu_amd_ibs_dying_cpu(unsigned int cpu) { - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - setup_APIC_ibs(NULL); - break; - case CPU_DYING: - clear_APIC_ibs(NULL); - break; - default: - break; - } - - return NOTIFY_OK; + clear_APIC_ibs(); + return 0; } static __init int amd_ibs_init(void) { u32 caps; - int ret = -EINVAL; caps = __get_ibs_caps(); if (!caps) @@ -1004,22 +993,25 @@ static __init int amd_ibs_init(void) ibs_eilvt_setup(); if (!ibs_eilvt_valid()) - goto out; + return -EINVAL; perf_ibs_pm_init(); - cpu_notifier_register_begin(); + ibs_caps = caps; /* make ibs_caps visible to other cpus: */ smp_mb(); - smp_call_function(setup_APIC_ibs, NULL, 1); - __perf_cpu_notifier(perf_ibs_cpu_notifier); - cpu_notifier_register_done(); + /* + * x86_pmu_amd_ibs_starting_cpu will be called from core on + * all online cpus. + */ + cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING, + "AP_PERF_X86_AMD_IBS_STARTING", + x86_pmu_amd_ibs_starting_cpu, + x86_pmu_amd_ibs_dying_cpu); - ret = perf_event_ibs_init(); -out: - if (ret) - pr_err("Failed to setup IBS, %d\n", ret); - return ret; + perf_event_ibs_init(); + + return 0; } /* Since we need the pci subsystem to init ibs we can't do this earlier: */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 569d057cc7d5..46720197b307 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -23,6 +23,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_UNCORE_STARTING, CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, + CPUHP_AP_PERF_X86_AMD_IBS_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 8b5b773d6245138c461b3a3e242339fa50fa9c58 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:15 +0000 Subject: perf/x86/intel/rapl: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Dave Hansen Cc: Huang Rui Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Srinivas Pandruvada Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.008808086@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/intel/rapl.c | 84 +++++++++++++------------------------------- include/linux/cpuhotplug.h | 2 ++ 2 files changed, 27 insertions(+), 59 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index e30eef4f29a6..6255ede56174 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -555,14 +555,14 @@ const struct attribute_group *rapl_attr_groups[] = { NULL, }; -static void rapl_cpu_exit(int cpu) +static int rapl_cpu_offline(unsigned int cpu) { struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); int target; /* Check if exiting cpu is used for collecting rapl events */ if (!cpumask_test_and_clear_cpu(cpu, &rapl_cpu_mask)) - return; + return 0; pmu->cpu = -1; /* Find a new cpu to collect rapl events */ @@ -574,9 +574,10 @@ static void rapl_cpu_exit(int cpu) pmu->cpu = target; perf_pmu_migrate_context(pmu->pmu, cpu, target); } + return 0; } -static void rapl_cpu_init(int cpu) +static int rapl_cpu_online(unsigned int cpu) { struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); int target; @@ -587,13 +588,14 @@ static void rapl_cpu_init(int cpu) */ target = cpumask_any_and(&rapl_cpu_mask, topology_core_cpumask(cpu)); if (target < nr_cpu_ids) - return; + return 0; cpumask_set_cpu(cpu, &rapl_cpu_mask); pmu->cpu = cpu; + return 0; } -static int rapl_cpu_prepare(int cpu) +static int rapl_cpu_prepare(unsigned int cpu) { struct rapl_pmu *pmu = cpu_to_rapl_pmu(cpu); @@ -614,33 +616,6 @@ static int rapl_cpu_prepare(int cpu) return 0; } -static int rapl_cpu_notifier(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - rapl_cpu_prepare(cpu); - break; - - case CPU_DOWN_FAILED: - case CPU_ONLINE: - rapl_cpu_init(cpu); - break; - - case CPU_DOWN_PREPARE: - rapl_cpu_exit(cpu); - break; - } - return NOTIFY_OK; -} - -static struct notifier_block rapl_cpu_nb = { - .notifier_call = rapl_cpu_notifier, - .priority = CPU_PRI_PERF + 1, -}; - static int rapl_check_hw_unit(bool apply_quirk) { u64 msr_rapl_power_unit_bits; @@ -691,24 +666,6 @@ static void __init rapl_advertise(void) } } -static int __init rapl_prepare_cpus(void) -{ - unsigned int cpu, pkg; - int ret; - - for_each_online_cpu(cpu) { - pkg = topology_logical_package_id(cpu); - if (rapl_pmus->pmus[pkg]) - continue; - - ret = rapl_cpu_prepare(cpu); - if (ret) - return ret; - rapl_cpu_init(cpu); - } - return 0; -} - static void cleanup_rapl_pmus(void) { int i; @@ -835,35 +792,44 @@ static int __init rapl_pmu_init(void) if (ret) return ret; - cpu_notifier_register_begin(); + /* + * Install callbacks. Core will call them for each online cpu. + */ - ret = rapl_prepare_cpus(); + ret = cpuhp_setup_state(CPUHP_PERF_X86_RAPL_PREP, "PERF_X86_RAPL_PREP", + rapl_cpu_prepare, NULL); if (ret) goto out; + ret = cpuhp_setup_state(CPUHP_AP_PERF_X86_RAPL_ONLINE, + "AP_PERF_X86_RAPL_ONLINE", + rapl_cpu_online, rapl_cpu_offline); + if (ret) + goto out1; + ret = perf_pmu_register(&rapl_pmus->pmu, "power", -1); if (ret) - goto out; + goto out2; - __register_cpu_notifier(&rapl_cpu_nb); - cpu_notifier_register_done(); rapl_advertise(); return 0; +out2: + cpuhp_remove_state(CPUHP_AP_PERF_X86_RAPL_ONLINE); +out1: + cpuhp_remove_state(CPUHP_PERF_X86_RAPL_PREP); out: pr_warn("Initialization failed (%d), disabled\n", ret); cleanup_rapl_pmus(); - cpu_notifier_register_done(); return ret; } module_init(rapl_pmu_init); static void __exit intel_rapl_exit(void) { - cpu_notifier_register_begin(); - __unregister_cpu_notifier(&rapl_cpu_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_RAPL_ONLINE); + cpuhp_remove_state_nocalls(CPUHP_PERF_X86_RAPL_PREP); perf_pmu_unregister(&rapl_pmus->pmu); cleanup_rapl_pmus(); - cpu_notifier_register_done(); } module_exit(intel_rapl_exit); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 46720197b307..06d77fd0fda3 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -8,6 +8,7 @@ enum cpuhp_state { CPUHP_PERF_X86_PREPARE, CPUHP_PERF_X86_UNCORE_PREP, CPUHP_PERF_X86_AMD_UNCORE_PREP, + CPUHP_PERF_X86_RAPL_PREP, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, @@ -34,6 +35,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_ONLINE, CPUHP_AP_PERF_X86_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, + CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From f07048270423622a2428a9b90929c76e92777caa Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:16 +0000 Subject: perf/x86/intel/cqm: Convert Intel CQM to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Jiri Olsa Cc: Linus Torvalds Cc: Matt Fleming Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Tony Luck Cc: Vikas Shivappa Cc: Vince Weaver Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.096956222@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/intel/cqm.c | 49 +++++++++++++++++---------------------------- include/linux/cpuhotplug.h | 2 ++ 2 files changed, 20 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c index 7b5fd811ef45..783c49ddef29 100644 --- a/arch/x86/events/intel/cqm.c +++ b/arch/x86/events/intel/cqm.c @@ -1577,7 +1577,7 @@ static inline void cqm_pick_event_reader(int cpu) cpumask_set_cpu(cpu, &cqm_cpumask); } -static void intel_cqm_cpu_starting(unsigned int cpu) +static int intel_cqm_cpu_starting(unsigned int cpu) { struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); struct cpuinfo_x86 *c = &cpu_data(cpu); @@ -1588,39 +1588,26 @@ static void intel_cqm_cpu_starting(unsigned int cpu) WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid); WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale); + + cqm_pick_event_reader(cpu); + return 0; } -static void intel_cqm_cpu_exit(unsigned int cpu) +static int intel_cqm_cpu_exit(unsigned int cpu) { int target; /* Is @cpu the current cqm reader for this package ? */ if (!cpumask_test_and_clear_cpu(cpu, &cqm_cpumask)) - return; + return 0; /* Find another online reader in this package */ target = cpumask_any_but(topology_core_cpumask(cpu), cpu); if (target < nr_cpu_ids) cpumask_set_cpu(target, &cqm_cpumask); -} - -static int intel_cqm_cpu_notifier(struct notifier_block *nb, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (unsigned long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_PREPARE: - intel_cqm_cpu_exit(cpu); - break; - case CPU_STARTING: - intel_cqm_cpu_starting(cpu); - cqm_pick_event_reader(cpu); - break; - } - return NOTIFY_OK; + return 0; } static const struct x86_cpu_id intel_cqm_match[] = { @@ -1682,7 +1669,7 @@ out: static int __init intel_cqm_init(void) { char *str = NULL, scale[20]; - int i, cpu, ret; + int cpu, ret; if (x86_match_cpu(intel_cqm_match)) cqm_enabled = true; @@ -1705,8 +1692,7 @@ static int __init intel_cqm_init(void) * * Also, check that the scales match on all cpus. */ - cpu_notifier_register_begin(); - + get_online_cpus(); for_each_online_cpu(cpu) { struct cpuinfo_x86 *c = &cpu_data(cpu); @@ -1743,11 +1729,6 @@ static int __init intel_cqm_init(void) if (ret) goto out; - for_each_online_cpu(i) { - intel_cqm_cpu_starting(i); - cqm_pick_event_reader(i); - } - if (mbm_enabled) ret = intel_mbm_init(); if (ret && !cqm_enabled) @@ -1772,12 +1753,18 @@ static int __init intel_cqm_init(void) pr_info("Intel MBM enabled\n"); /* - * Register the hot cpu notifier once we are sure cqm + * Setup the hot cpu notifier once we are sure cqm * is enabled to avoid notifier leak. */ - __perf_cpu_notifier(intel_cqm_cpu_notifier); + cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_STARTING, + "AP_PERF_X86_CQM_STARTING", + intel_cqm_cpu_starting, NULL); + cpuhp_setup_state(CPUHP_AP_PERF_X86_CQM_ONLINE, "AP_PERF_X86_CQM_ONLINE", + NULL, intel_cqm_cpu_exit); + out: - cpu_notifier_register_done(); + put_online_cpus(); + if (ret) { kfree(str); cqm_cleanup(); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 06d77fd0fda3..f9399eeba263 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -25,6 +25,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING, + CPUHP_AP_PERF_X86_CQM_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, @@ -36,6 +37,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_RAPL_ONLINE, + CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From 77c34ef1c3194bfac65883af75baf7dec9fa0d77 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:18 +0000 Subject: perf/x86/intel/cstate: Convert Intel CSTATE to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Dave Hansen Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: kbuild test robot Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.184061086@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/intel/cstate.c | 51 +++++++++++++----------------------------- include/linux/cpuhotplug.h | 2 ++ 2 files changed, 17 insertions(+), 36 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index 9ba4e4136a15..d6d7be0b3495 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -365,7 +365,7 @@ static int cstate_pmu_event_add(struct perf_event *event, int mode) * Check if exiting cpu is the designated reader. If so migrate the * events when there is a valid target available */ -static void cstate_cpu_exit(int cpu) +static int cstate_cpu_exit(unsigned int cpu) { unsigned int target; @@ -390,9 +390,10 @@ static void cstate_cpu_exit(int cpu) perf_pmu_migrate_context(&cstate_pkg_pmu, cpu, target); } } + return 0; } -static void cstate_cpu_init(int cpu) +static int cstate_cpu_init(unsigned int cpu) { unsigned int target; @@ -414,31 +415,10 @@ static void cstate_cpu_init(int cpu) topology_core_cpumask(cpu)); if (has_cstate_pkg && target >= nr_cpu_ids) cpumask_set_cpu(cpu, &cstate_pkg_cpu_mask); -} -static int cstate_cpu_notifier(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - cstate_cpu_init(cpu); - break; - case CPU_DOWN_PREPARE: - cstate_cpu_exit(cpu); - break; - default: - break; - } - return NOTIFY_OK; + return 0; } -static struct notifier_block cstate_cpu_nb = { - .notifier_call = cstate_cpu_notifier, - .priority = CPU_PRI_PERF + 1, -}; - static struct pmu cstate_core_pmu = { .attr_groups = core_attr_groups, .name = "cstate_core", @@ -599,18 +579,20 @@ static inline void cstate_cleanup(void) static int __init cstate_init(void) { - int cpu, err; + int err; - cpu_notifier_register_begin(); - for_each_online_cpu(cpu) - cstate_cpu_init(cpu); + cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_STARTING, + "AP_PERF_X86_CSTATE_STARTING", cstate_cpu_init, + NULL); + cpuhp_setup_state(CPUHP_AP_PERF_X86_CSTATE_ONLINE, + "AP_PERF_X86_CSTATE_ONLINE", NULL, cstate_cpu_exit); if (has_cstate_core) { err = perf_pmu_register(&cstate_core_pmu, cstate_core_pmu.name, -1); if (err) { has_cstate_core = false; pr_info("Failed to register cstate core pmu\n"); - goto out; + return err; } } @@ -620,12 +602,10 @@ static int __init cstate_init(void) has_cstate_pkg = false; pr_info("Failed to register cstate pkg pmu\n"); cstate_cleanup(); - goto out; + return err; } } - __register_cpu_notifier(&cstate_cpu_nb); -out: - cpu_notifier_register_done(); + return err; } @@ -651,9 +631,8 @@ module_init(cstate_pmu_init); static void __exit cstate_pmu_exit(void) { - cpu_notifier_register_begin(); - __unregister_cpu_notifier(&cstate_cpu_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_ONLINE); + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_CSTATE_STARTING); cstate_cleanup(); - cpu_notifier_register_done(); } module_exit(cstate_pmu_exit); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f9399eeba263..68f4495f4988 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -26,6 +26,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING, CPUHP_AP_PERF_X86_CQM_STARTING, + CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, @@ -38,6 +39,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE, + CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From a409f5ee2937cb15b4af485fa98b90946d9bea3a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:19 +0000 Subject: blackfin/perf: Convert hotplug notifier to state machine Install the callback via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Steven Miao Cc: adi-buildroot-devel@lists.sourceforge.net Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.265797537@linutronix.de Signed-off-by: Ingo Molnar --- arch/blackfin/kernel/perf_event.c | 26 +++++--------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 6 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/arch/blackfin/kernel/perf_event.c b/arch/blackfin/kernel/perf_event.c index 170d786807c4..6355e97d22b9 100644 --- a/arch/blackfin/kernel/perf_event.c +++ b/arch/blackfin/kernel/perf_event.c @@ -453,29 +453,13 @@ static struct pmu pmu = { .read = bfin_pmu_read, }; -static void bfin_pmu_setup(int cpu) +static int bfin_pmu_prepare_cpu(unsigned int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); + bfin_write_PFCTL(0); memset(cpuhw, 0, sizeof(struct cpu_hw_events)); -} - -static int -bfin_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - bfin_write_PFCTL(0); - bfin_pmu_setup(cpu); - break; - - default: - break; - } - - return NOTIFY_OK; + return 0; } static int __init bfin_pmu_init(void) @@ -491,8 +475,8 @@ static int __init bfin_pmu_init(void) ret = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); if (!ret) - perf_cpu_notifier(bfin_pmu_notifier); - + cpuhp_setup_state(CPUHP_PERF_BFIN, "PERF_BFIN", + bfin_pmu_prepare_cpu, NULL); return ret; } early_initcall(bfin_pmu_init); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 68f4495f4988..d54973e62366 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -9,6 +9,7 @@ enum cpuhp_state { CPUHP_PERF_X86_UNCORE_PREP, CPUHP_PERF_X86_AMD_UNCORE_PREP, CPUHP_PERF_X86_RAPL_PREP, + CPUHP_PERF_BFIN, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, -- cgit v1.2.3-59-g8ed1b From 57ecde42cc7493fb74029918d0a8f97aec854876 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:20 +0000 Subject: powerpc/perf: Convert book3s notifier to state machine callbacks Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Anshuman Khandual Cc: Benjamin Herrenschmidt Cc: Linus Torvalds Cc: Madhavan Srinivasan Cc: Michael Ellerman Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Sukadev Bhattiprolu Cc: linuxppc-dev@lists.ozlabs.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.345786236@linutronix.de Signed-off-by: Ingo Molnar --- arch/powerpc/perf/core-book3s.c | 30 +++++++----------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 8 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 97a1d40d8696..ffd61d55fb25 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -2158,31 +2158,15 @@ static void perf_event_interrupt(struct pt_regs *regs) irq_exit(); } -static void power_pmu_setup(int cpu) +int power_pmu_prepare_cpu(unsigned int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); - if (!ppmu) - return; - memset(cpuhw, 0, sizeof(*cpuhw)); - cpuhw->mmcr[0] = MMCR0_FC; -} - -static int -power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - power_pmu_setup(cpu); - break; - - default: - break; + if (ppmu) { + memset(cpuhw, 0, sizeof(*cpuhw)); + cpuhw->mmcr[0] = MMCR0_FC; } - - return NOTIFY_OK; + return 0; } int register_power_pmu(struct power_pmu *pmu) @@ -2205,7 +2189,7 @@ int register_power_pmu(struct power_pmu *pmu) #endif /* CONFIG_PPC64 */ perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); - perf_cpu_notifier(power_pmu_notifier); - + cpuhp_setup_state(CPUHP_PERF_POWER, "PERF_POWER", + power_pmu_prepare_cpu, NULL); return 0; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d54973e62366..b265e992a481 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -10,6 +10,7 @@ enum cpuhp_state { CPUHP_PERF_X86_AMD_UNCORE_PREP, CPUHP_PERF_X86_RAPL_PREP, CPUHP_PERF_BFIN, + CPUHP_PERF_POWER, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, -- cgit v1.2.3-59-g8ed1b From 4f0f8217e69a0f6ff727913aa49cb64632ff7b6a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:21 +0000 Subject: s390/perf: Convert the hotplug notifier to state machine callbacks (Counter) Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Adam Buchbinder Cc: Heiko Carstens Cc: Hendrik Brueckner Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Peter Zijlstra Cc: Sukadev Bhattiprolu Cc: linux-s390@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.436370635@linutronix.de Signed-off-by: Ingo Molnar --- arch/s390/kernel/perf_cpum_cf.c | 46 +++++++++++++++++------------------------ include/linux/cpuhotplug.h | 1 + 2 files changed, 20 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c index 7ec63b1d920d..037c2a253ae4 100644 --- a/arch/s390/kernel/perf_cpum_cf.c +++ b/arch/s390/kernel/perf_cpum_cf.c @@ -664,30 +664,22 @@ static struct pmu cpumf_pmu = { .cancel_txn = cpumf_pmu_cancel_txn, }; -static int cpumf_pmu_notifier(struct notifier_block *self, unsigned long action, - void *hcpu) +static int cpumf_pmf_setup(unsigned int cpu, int flags) { - int flags; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - case CPU_DOWN_FAILED: - flags = PMC_INIT; - local_irq_disable(); - setup_pmc_cpu(&flags); - local_irq_enable(); - break; - case CPU_DOWN_PREPARE: - flags = PMC_RELEASE; - local_irq_disable(); - setup_pmc_cpu(&flags); - local_irq_enable(); - break; - default: - break; - } + local_irq_disable(); + setup_pmc_cpu(&flags); + local_irq_enable(); + return 0; +} + +static int s390_pmu_online_cpu(unsigned int cpu) +{ + return cpumf_pmf_setup(cpu, PMC_INIT); +} - return NOTIFY_OK; +static int s390_pmu_offline_cpu(unsigned int cpu) +{ + return cpumf_pmf_setup(cpu, PMC_RELEASE); } static int __init cpumf_pmu_init(void) @@ -707,7 +699,7 @@ static int __init cpumf_pmu_init(void) if (rc) { pr_err("Registering for CPU-measurement alerts " "failed with rc=%i\n", rc); - goto out; + return rc; } cpumf_pmu.attr_groups = cpumf_cf_event_group(); @@ -716,10 +708,10 @@ static int __init cpumf_pmu_init(void) pr_err("Registering the cpum_cf PMU failed with rc=%i\n", rc); unregister_external_irq(EXT_IRQ_MEASURE_ALERT, cpumf_measurement_alert); - goto out; + return rc; } - perf_cpu_notifier(cpumf_pmu_notifier); -out: - return rc; + return cpuhp_setup_state(CPUHP_AP_PERF_S390_CF_ONLINE, + "AP_PERF_S390_CF_ONLINE", + s390_pmu_online_cpu, s390_pmu_offline_cpu); } early_initcall(cpumf_pmu_init); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index b265e992a481..d4e09cb0c100 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -42,6 +42,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, + CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From e3d617fe6ac7294974fc513dc5e4d8ada8080fd1 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:22 +0000 Subject: s390/perf: Convert the hotplug notifier to state machine callbacks (Sampling) Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Christian Borntraeger Cc: Heiko Carstens Cc: Hendrik Brueckner Cc: Linus Torvalds Cc: Martin Schwidefsky Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-s390@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.518084858@linutronix.de Signed-off-by: Ingo Molnar --- arch/s390/kernel/perf_cpum_sf.c | 45 +++++++++++++++++------------------------ include/linux/cpuhotplug.h | 1 + 2 files changed, 20 insertions(+), 26 deletions(-) (limited to 'include/linux') diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c index a8e832166417..f4a4c118f8b4 100644 --- a/arch/s390/kernel/perf_cpum_sf.c +++ b/arch/s390/kernel/perf_cpum_sf.c @@ -1506,37 +1506,28 @@ static void cpumf_measurement_alert(struct ext_code ext_code, sf_disable(); } } - -static int cpumf_pmu_notifier(struct notifier_block *self, - unsigned long action, void *hcpu) +static int cpusf_pmu_setup(unsigned int cpu, int flags) { - int flags; - /* Ignore the notification if no events are scheduled on the PMU. * This might be racy... */ if (!atomic_read(&num_events)) - return NOTIFY_OK; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - case CPU_DOWN_FAILED: - flags = PMC_INIT; - local_irq_disable(); - setup_pmc_cpu(&flags); - local_irq_enable(); - break; - case CPU_DOWN_PREPARE: - flags = PMC_RELEASE; - local_irq_disable(); - setup_pmc_cpu(&flags); - local_irq_enable(); - break; - default: - break; - } + return 0; - return NOTIFY_OK; + local_irq_disable(); + setup_pmc_cpu(&flags); + local_irq_enable(); + return 0; +} + +static int s390_pmu_sf_online_cpu(unsigned int cpu) +{ + return cpusf_pmu_setup(cpu, PMC_INIT); +} + +static int s390_pmu_sf_offline_cpu(unsigned int cpu) +{ + return cpusf_pmu_setup(cpu, PMC_RELEASE); } static int param_get_sfb_size(char *buffer, const struct kernel_param *kp) @@ -1636,7 +1627,9 @@ static int __init init_cpum_sampling_pmu(void) cpumf_measurement_alert); goto out; } - perf_cpu_notifier(cpumf_pmu_notifier); + + cpuhp_setup_state(CPUHP_AP_PERF_S390_SF_ONLINE, "AP_PERF_S390_SF_ONLINE", + s390_pmu_sf_online_cpu, s390_pmu_sf_offline_cpu); out: return err; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d4e09cb0c100..fea66667d190 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -43,6 +43,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, + CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From e3cfce17d309a3c914e36ac4b4b26c30045d4274 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:23 +0000 Subject: sh/perf: Convert the hotplug notifiers to state machine callbacks Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Rich Felker Cc: Yoshinori Sato Cc: linux-sh@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.597790464@linutronix.de Signed-off-by: Ingo Molnar --- arch/sh/kernel/perf_event.c | 23 ++++------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 5 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 4dca18347ee9..ba3269a8304b 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c @@ -352,28 +352,12 @@ static struct pmu pmu = { .read = sh_pmu_read, }; -static void sh_pmu_setup(int cpu) +static int sh_pmu_prepare_cpu(unsigned int cpu) { struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); memset(cpuhw, 0, sizeof(struct cpu_hw_events)); -} - -static int -sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - sh_pmu_setup(cpu); - break; - - default: - break; - } - - return NOTIFY_OK; + return 0; } int register_sh_pmu(struct sh_pmu *_pmu) @@ -394,6 +378,7 @@ int register_sh_pmu(struct sh_pmu *_pmu) WARN_ON(_pmu->num_events > MAX_HWEVENTS); perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); - perf_cpu_notifier(sh_pmu_notifier); + cpuhp_setup_state(CPUHP_PERF_SUPERH, "PERF_SUPERH", sh_pmu_prepare_cpu, + NULL); return 0; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index fea66667d190..16ab41779ca7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -11,6 +11,7 @@ enum cpuhp_state { CPUHP_PERF_X86_RAPL_PREP, CPUHP_PERF_BFIN, CPUHP_PERF_POWER, + CPUHP_PERF_SUPERH, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, -- cgit v1.2.3-59-g8ed1b From 28c94843bb873196f704e9a26775b117a32a31ad Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:24 +0000 Subject: bus/arm-cci: Convert to hotplug statemachine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Andrzej Hajda Cc: Linus Torvalds Cc: Mark Rutland Cc: Olof Johansson Cc: Pawel Moll Cc: Peter Zijlstra Cc: Punit Agrawal Cc: Suzuki K Poulose Cc: Suzuki K. Poulose Cc: Thomas Gleixner Cc: Will Deacon Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.679142601@linutronix.de Signed-off-by: Ingo Molnar --- drivers/bus/arm-cci.c | 53 ++++++++++++++++++++-------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 24 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index a49b28378d59..5755907f836f 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -144,12 +144,15 @@ struct cci_pmu { int num_cntrs; atomic_t active_events; struct mutex reserve_mutex; - struct notifier_block cpu_nb; + struct list_head entry; cpumask_t cpus; }; #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) +static DEFINE_MUTEX(cci_pmu_mutex); +static LIST_HEAD(cci_pmu_list); + enum cci_models { #ifdef CONFIG_ARM_CCI400_PMU CCI400_R0, @@ -1503,31 +1506,26 @@ static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) return perf_pmu_register(&cci_pmu->pmu, name, -1); } -static int cci_pmu_cpu_notifier(struct notifier_block *self, - unsigned long action, void *hcpu) +static int cci_pmu_offline_cpu(unsigned int cpu) { - struct cci_pmu *cci_pmu = container_of(self, - struct cci_pmu, cpu_nb); - unsigned int cpu = (long)hcpu; + struct cci_pmu *cci_pmu; unsigned int target; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_PREPARE: + mutex_lock(&cci_pmu_mutex); + list_for_each_entry(cci_pmu, &cci_pmu_list, entry) { if (!cpumask_test_and_clear_cpu(cpu, &cci_pmu->cpus)) - break; + continue; target = cpumask_any_but(cpu_online_mask, cpu); - if (target >= nr_cpu_ids) // UP, last CPU - break; + if (target >= nr_cpu_ids) + continue; /* * TODO: migrate context once core races on event->ctx have * been fixed. */ cpumask_set_cpu(target, &cci_pmu->cpus); - default: - break; } - - return NOTIFY_OK; + mutex_unlock(&cci_pmu_mutex); + return 0; } static struct cci_pmu_model cci_pmu_models[] = { @@ -1766,24 +1764,13 @@ static int cci_pmu_probe(struct platform_device *pdev) atomic_set(&cci_pmu->active_events, 0); cpumask_set_cpu(smp_processor_id(), &cci_pmu->cpus); - cci_pmu->cpu_nb = (struct notifier_block) { - .notifier_call = cci_pmu_cpu_notifier, - /* - * to migrate uncore events, our notifier should be executed - * before perf core's notifier. - */ - .priority = CPU_PRI_PERF + 1, - }; - - ret = register_cpu_notifier(&cci_pmu->cpu_nb); + ret = cci_pmu_init(cci_pmu, pdev); if (ret) return ret; - ret = cci_pmu_init(cci_pmu, pdev); - if (ret) { - unregister_cpu_notifier(&cci_pmu->cpu_nb); - return ret; - } + mutex_lock(&cci_pmu_mutex); + list_add(&cci_pmu->entry, &cci_pmu_list); + mutex_unlock(&cci_pmu_mutex); pr_info("ARM %s PMU driver probed", cci_pmu->model->name); return 0; @@ -1817,6 +1804,12 @@ static int __init cci_platform_init(void) { int ret; + ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, + "AP_PERF_ARM_CCI_ONLINE", NULL, + cci_pmu_offline_cpu); + if (ret) + return ret; + ret = platform_driver_register(&cci_pmu_driver); if (ret) return ret; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 16ab41779ca7..0f2f727594fa 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -45,6 +45,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, + CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From fdc15a36d84eeb52f58b907f03fd8f77f5648235 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:25 +0000 Subject: bus/arm-ccn: Convert to hotplug statemachine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Arnd Bergmann Cc: Dan Carpenter Cc: Linus Torvalds Cc: Pawel Moll Cc: Peter Zijlstra Cc: Suzuki K Poulose Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.768498577@linutronix.de Signed-off-by: Ingo Molnar --- drivers/bus/arm-ccn.c | 57 +++++++++++++++++++++++----------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 29 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c index acc3eb542c74..97a9185af433 100644 --- a/drivers/bus/arm-ccn.c +++ b/drivers/bus/arm-ccn.c @@ -167,7 +167,7 @@ struct arm_ccn_dt { struct hrtimer hrtimer; cpumask_t cpu; - struct notifier_block cpu_nb; + struct list_head entry; struct pmu pmu; }; @@ -189,6 +189,8 @@ struct arm_ccn { struct arm_ccn_dt dt; }; +static DEFINE_MUTEX(arm_ccn_mutex); +static LIST_HEAD(arm_ccn_list); static int arm_ccn_node_to_xp(int node) { @@ -1171,30 +1173,27 @@ static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer) } -static int arm_ccn_pmu_cpu_notifier(struct notifier_block *nb, - unsigned long action, void *hcpu) +static int arm_ccn_pmu_offline_cpu(unsigned int cpu) { - struct arm_ccn_dt *dt = container_of(nb, struct arm_ccn_dt, cpu_nb); - struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); - unsigned int cpu = (long)hcpu; /* for (long) see kernel/cpu.c */ + struct arm_ccn_dt *dt; unsigned int target; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_PREPARE: + mutex_lock(&arm_ccn_mutex); + list_for_each_entry(dt, &arm_ccn_list, entry) { + struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); + if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) - break; + continue; target = cpumask_any_but(cpu_online_mask, cpu); if (target >= nr_cpu_ids) - break; + continue; perf_pmu_migrate_context(&dt->pmu, cpu, target); cpumask_set_cpu(target, &dt->cpu); if (ccn->irq) WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); - default: - break; } - - return NOTIFY_OK; + mutex_unlock(&arm_ccn_mutex); + return 0; } @@ -1266,16 +1265,6 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) /* Pick one CPU which we will use to collect data from CCN... */ cpumask_set_cpu(smp_processor_id(), &ccn->dt.cpu); - /* - * ... and change the selection when it goes offline. Priority is - * picked to have a chance to migrate events before perf is notified. - */ - ccn->dt.cpu_nb.notifier_call = arm_ccn_pmu_cpu_notifier; - ccn->dt.cpu_nb.priority = CPU_PRI_PERF + 1, - err = register_cpu_notifier(&ccn->dt.cpu_nb); - if (err) - goto error_cpu_notifier; - /* Also make sure that the overflow interrupt is handled by this CPU */ if (ccn->irq) { err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); @@ -1289,12 +1278,13 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn) if (err) goto error_pmu_register; + mutex_lock(&arm_ccn_mutex); + list_add(&ccn->dt.entry, &arm_ccn_list); + mutex_unlock(&arm_ccn_mutex); return 0; error_pmu_register: error_set_affinity: - unregister_cpu_notifier(&ccn->dt.cpu_nb); -error_cpu_notifier: ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); @@ -1306,9 +1296,12 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) { int i; + mutex_lock(&arm_ccn_mutex); + list_del(&ccn->dt.entry); + mutex_unlock(&arm_ccn_mutex); + if (ccn->irq) irq_set_affinity_hint(ccn->irq, NULL); - unregister_cpu_notifier(&ccn->dt.cpu_nb); for (i = 0; i < ccn->num_xps; i++) writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL); writel(0, ccn->dt.base + CCN_DT_PMCR); @@ -1316,7 +1309,6 @@ static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn) ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); } - static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn, int (*callback)(struct arm_ccn *ccn, int region, void __iomem *base, u32 type, u32 id)) @@ -1533,7 +1525,13 @@ static struct platform_driver arm_ccn_driver = { static int __init arm_ccn_init(void) { - int i; + int i, ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE, + "AP_PERF_ARM_CCN_ONLINE", NULL, + arm_ccn_pmu_offline_cpu); + if (ret) + return ret; for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++) arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr; @@ -1543,6 +1541,7 @@ static int __init arm_ccn_init(void) static void __exit arm_ccn_exit(void) { + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE); platform_driver_unregister(&arm_ccn_driver); } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 0f2f727594fa..2ee3c0edcc2d 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -46,6 +46,7 @@ enum cpuhp_state { CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, + CPUHP_AP_PERF_ARM_CCN_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, -- cgit v1.2.3-59-g8ed1b From 25a77b55e74c46c79f838a97e782a45c33588ca6 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:26 +0000 Subject: xtensa/perf: Convert the hotplug notifier to state machine callbacks Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Chris Zankel Cc: Linus Torvalds Cc: Max Filippov Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-xtensa@linux-xtensa.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153334.852575891@linutronix.de Signed-off-by: Ingo Molnar --- arch/xtensa/kernel/perf_event.c | 26 +++++++++----------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 10 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/arch/xtensa/kernel/perf_event.c b/arch/xtensa/kernel/perf_event.c index ef90479e0397..0fecc8a2c0b5 100644 --- a/arch/xtensa/kernel/perf_event.c +++ b/arch/xtensa/kernel/perf_event.c @@ -404,7 +404,7 @@ static struct pmu xtensa_pmu = { .read = xtensa_pmu_read, }; -static void xtensa_pmu_setup(void) +static int xtensa_pmu_setup(int cpu) { unsigned i; @@ -413,21 +413,7 @@ static void xtensa_pmu_setup(void) set_er(0, XTENSA_PMU_PMCTRL(i)); set_er(get_er(XTENSA_PMU_PMSTAT(i)), XTENSA_PMU_PMSTAT(i)); } -} - -static int xtensa_pmu_notifier(struct notifier_block *self, - unsigned long action, void *data) -{ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - xtensa_pmu_setup(); - break; - - default: - break; - } - - return NOTIFY_OK; + return 0; } static int __init xtensa_pmu_init(void) @@ -435,7 +421,13 @@ static int __init xtensa_pmu_init(void) int ret; int irq = irq_create_mapping(NULL, XCHAL_PROFILING_INTERRUPT); - perf_cpu_notifier(xtensa_pmu_notifier); + ret = cpuhp_setup_state(CPUHP_AP_PERF_XTENSA_STARTING, + "AP_PERF_XTENSA_STARTING", xtensa_pmu_setup, + NULL); + if (ret) { + pr_err("xtensa_pmu: failed to register CPU-hotplug.\n"); + return ret; + } #if XTENSA_FAKE_NMI enable_irq(irq); #else diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 2ee3c0edcc2d..f187c46eb7a1 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -30,6 +30,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_AMD_IBS_STARTING, CPUHP_AP_PERF_X86_CQM_STARTING, CPUHP_AP_PERF_X86_CSTATE_STARTING, + CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 89ab9cb16931873ec600a909b3a38436352e629a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:28 +0000 Subject: perf/core: Remove perf CPU notifier code All users converted to state machine callbacks. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Cc: Nicolas Iooss Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.115333381@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpu.h | 2 -- include/linux/perf_event.h | 35 ----------------------------------- 2 files changed, 37 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 21597dcac0e2..ca2dd865a34e 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -59,8 +59,6 @@ struct notifier_block; * CPU notifier priorities. */ enum { - CPU_PRI_PERF = 20, - /* bring up workqueues before normal notifiers and down after */ CPU_PRI_WORKQUEUE_UP = 5, CPU_PRI_WORKQUEUE_DOWN = -5, diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 9abeb6948e70..ddd3dab0f39e 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -1285,41 +1285,6 @@ static inline void perf_restore_debug_store(void) { } #define perf_output_put(handle, x) perf_output_copy((handle), &(x), sizeof(x)) -/* - * This has to have a higher priority than migration_notifier in sched/core.c. - */ -#define perf_cpu_notifier(fn) \ -do { \ - static struct notifier_block fn##_nb = \ - { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ - unsigned long cpu = smp_processor_id(); \ - unsigned long flags; \ - \ - cpu_notifier_register_begin(); \ - fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE, \ - (void *)(unsigned long)cpu); \ - local_irq_save(flags); \ - fn(&fn##_nb, (unsigned long)CPU_STARTING, \ - (void *)(unsigned long)cpu); \ - local_irq_restore(flags); \ - fn(&fn##_nb, (unsigned long)CPU_ONLINE, \ - (void *)(unsigned long)cpu); \ - __register_cpu_notifier(&fn##_nb); \ - cpu_notifier_register_done(); \ -} while (0) - -/* - * Bare-bones version of perf_cpu_notifier(), which doesn't invoke the - * callback for already online CPUs. - */ -#define __perf_cpu_notifier(fn) \ -do { \ - static struct notifier_block fn##_nb = \ - { .notifier_call = fn, .priority = CPU_PRI_PERF }; \ - \ - __register_cpu_notifier(&fn##_nb); \ -} while (0) - struct perf_pmu_events_attr { struct device_attribute attr; u64 id; -- cgit v1.2.3-59-g8ed1b From c6a84daa3498f269e3b506757506f762b6f56080 Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Wed, 13 Jul 2016 17:16:28 +0000 Subject: perf/x86/amd/power: Convert the hotplug notifier to state machine Install the callbacks via the state machine. Signed-off-by: Anna-Maria Gleixner Cc: Borislav Petkov Cc: Huang Rui Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.027571056@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/events/amd/power.c | 58 +++++++++------------------------------------ include/linux/cpuhotplug.h | 1 + 2 files changed, 12 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/events/amd/power.c b/arch/x86/events/amd/power.c index 204df18e7cd2..9842270ed2f2 100644 --- a/arch/x86/events/amd/power.c +++ b/arch/x86/events/amd/power.c @@ -228,12 +228,12 @@ static struct pmu pmu_class = { .read = pmu_event_read, }; -static void power_cpu_exit(int cpu) +static int power_cpu_exit(unsigned int cpu) { int target; if (!cpumask_test_and_clear_cpu(cpu, &cpu_mask)) - return; + return 0; /* * Find a new CPU on the same compute unit, if was set in cpumask @@ -245,9 +245,10 @@ static void power_cpu_exit(int cpu) cpumask_set_cpu(target, &cpu_mask); perf_pmu_migrate_context(&pmu_class, cpu, target); } + return 0; } -static void power_cpu_init(int cpu) +static int power_cpu_init(unsigned int cpu) { int target; @@ -263,33 +264,9 @@ static void power_cpu_init(int cpu) target = cpumask_any_but(topology_sibling_cpumask(cpu), cpu); if (target >= nr_cpumask_bits) cpumask_set_cpu(cpu, &cpu_mask); + return 0; } -static int -power_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) -{ - unsigned int cpu = (long)hcpu; - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_FAILED: - case CPU_ONLINE: - power_cpu_init(cpu); - break; - case CPU_DOWN_PREPARE: - power_cpu_exit(cpu); - break; - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block power_cpu_notifier_nb = { - .notifier_call = power_cpu_notifier, - .priority = CPU_PRI_PERF, -}; - static const struct x86_cpu_id cpu_match[] = { { .vendor = X86_VENDOR_AMD, .family = 0x15 }, {}, @@ -297,7 +274,7 @@ static const struct x86_cpu_id cpu_match[] = { static int __init amd_power_pmu_init(void) { - int cpu, target, ret; + int ret; if (!x86_match_cpu(cpu_match)) return 0; @@ -312,38 +289,25 @@ static int __init amd_power_pmu_init(void) return -ENODEV; } - cpu_notifier_register_begin(); - /* Choose one online core of each compute unit. */ - for_each_online_cpu(cpu) { - target = cpumask_first(topology_sibling_cpumask(cpu)); - if (!cpumask_test_cpu(target, &cpu_mask)) - cpumask_set_cpu(target, &cpu_mask); - } + cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, + "AP_PERF_X86_AMD_POWER_ONLINE", + power_cpu_init, power_cpu_exit); ret = perf_pmu_register(&pmu_class, "power", -1); if (WARN_ON(ret)) { pr_warn("AMD Power PMU registration failed\n"); - goto out; + return ret; } - __register_cpu_notifier(&power_cpu_notifier_nb); - pr_info("AMD Power PMU detected\n"); - -out: - cpu_notifier_register_done(); - return ret; } module_init(amd_power_pmu_init); static void __exit amd_power_pmu_exit(void) { - cpu_notifier_register_begin(); - __unregister_cpu_notifier(&power_cpu_notifier_nb); - cpu_notifier_register_done(); - + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_X86_AMD_POWER_ONLINE); perf_pmu_unregister(&pmu_class); } module_exit(amd_power_pmu_exit); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f187c46eb7a1..acfeda137df8 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -41,6 +41,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_ONLINE, CPUHP_AP_PERF_X86_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, + CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, CPUHP_AP_PERF_X86_RAPL_ONLINE, CPUHP_AP_PERF_X86_CQM_ONLINE, CPUHP_AP_PERF_X86_CSTATE_ONLINE, -- cgit v1.2.3-59-g8ed1b From 7ee681b25284782ecf380bf5ccf55f13c52fd0ce Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:29 +0000 Subject: workqueue: Convert to state machine callbacks Get rid of the prio ordering of the separate notifiers and use a proper state callback pair. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Acked-by: Tejun Heo Cc: Andrew Morton Cc: Lai Jiangshan Cc: Linus Torvalds Cc: Nicolas Iooss Cc: Oleg Nesterov Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Rasmus Villemoes Cc: Rusty Russell Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.197083890@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpu.h | 9 ---- include/linux/cpuhotplug.h | 2 + include/linux/workqueue.h | 6 +++ kernel/cpu.c | 10 +++++ kernel/workqueue.c | 108 ++++++++++++++++++--------------------------- 5 files changed, 61 insertions(+), 74 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpu.h b/include/linux/cpu.h index ca2dd865a34e..797d9c8e9a1b 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -55,15 +55,6 @@ extern ssize_t arch_cpu_release(const char *, size_t); #endif struct notifier_block; -/* - * CPU notifier priorities. - */ -enum { - /* bring up workqueues before normal notifiers and down after */ - CPU_PRI_WORKQUEUE_UP = 5, - CPU_PRI_WORKQUEUE_DOWN = -5, -}; - #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */ #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */ #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index acfeda137df8..60557a9e783d 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -12,6 +12,7 @@ enum cpuhp_state { CPUHP_PERF_BFIN, CPUHP_PERF_POWER, CPUHP_PERF_SUPERH, + CPUHP_WORKQUEUE_PREP, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, @@ -49,6 +50,7 @@ enum cpuhp_state { CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, + CPUHP_AP_WORKQUEUE_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ca73c503b92a..26cc1df280d6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -625,4 +625,10 @@ void wq_watchdog_touch(int cpu); static inline void wq_watchdog_touch(int cpu) { } #endif /* CONFIG_WQ_WATCHDOG */ +#ifdef CONFIG_SMP +int workqueue_prepare_cpu(unsigned int cpu); +int workqueue_online_cpu(unsigned int cpu); +int workqueue_offline_cpu(unsigned int cpu); +#endif + #endif diff --git a/kernel/cpu.c b/kernel/cpu.c index 3705d9043c08..af53f820fec9 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1185,6 +1185,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { .startup = perf_event_init_cpu, .teardown = perf_event_exit_cpu, }, + [CPUHP_WORKQUEUE_PREP] = { + .name = "workqueue prepare", + .startup = workqueue_prepare_cpu, + .teardown = NULL, + }, /* * Preparatory and dead notifiers. Will be replaced once the notifiers * are converted to states. @@ -1267,6 +1272,11 @@ static struct cpuhp_step cpuhp_ap_states[] = { .startup = perf_event_init_cpu, .teardown = perf_event_exit_cpu, }, + [CPUHP_AP_WORKQUEUE_ONLINE] = { + .name = "workqueue online", + .startup = workqueue_online_cpu, + .teardown = workqueue_offline_cpu, + }, /* * Online/down_prepare notifiers. Will be removed once the notifiers diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e1c0e996b5ae..c9dd5fbdbf33 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -4611,84 +4611,65 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) pool->attrs->cpumask) < 0); } -/* - * Workqueues should be brought up before normal priority CPU notifiers. - * This will be registered high priority CPU notifier. - */ -static int workqueue_cpu_up_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +int workqueue_prepare_cpu(unsigned int cpu) +{ + struct worker_pool *pool; + + for_each_cpu_worker_pool(pool, cpu) { + if (pool->nr_workers) + continue; + if (!create_worker(pool)) + return -ENOMEM; + } + return 0; +} + +int workqueue_online_cpu(unsigned int cpu) { - int cpu = (unsigned long)hcpu; struct worker_pool *pool; struct workqueue_struct *wq; int pi; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_UP_PREPARE: - for_each_cpu_worker_pool(pool, cpu) { - if (pool->nr_workers) - continue; - if (!create_worker(pool)) - return NOTIFY_BAD; - } - break; - - case CPU_DOWN_FAILED: - case CPU_ONLINE: - mutex_lock(&wq_pool_mutex); + mutex_lock(&wq_pool_mutex); - for_each_pool(pool, pi) { - mutex_lock(&pool->attach_mutex); + for_each_pool(pool, pi) { + mutex_lock(&pool->attach_mutex); - if (pool->cpu == cpu) - rebind_workers(pool); - else if (pool->cpu < 0) - restore_unbound_workers_cpumask(pool, cpu); + if (pool->cpu == cpu) + rebind_workers(pool); + else if (pool->cpu < 0) + restore_unbound_workers_cpumask(pool, cpu); - mutex_unlock(&pool->attach_mutex); - } + mutex_unlock(&pool->attach_mutex); + } - /* update NUMA affinity of unbound workqueues */ - list_for_each_entry(wq, &workqueues, list) - wq_update_unbound_numa(wq, cpu, true); + /* update NUMA affinity of unbound workqueues */ + list_for_each_entry(wq, &workqueues, list) + wq_update_unbound_numa(wq, cpu, true); - mutex_unlock(&wq_pool_mutex); - break; - } - return NOTIFY_OK; + mutex_unlock(&wq_pool_mutex); + return 0; } -/* - * Workqueues should be brought down after normal priority CPU notifiers. - * This will be registered as low priority CPU notifier. - */ -static int workqueue_cpu_down_callback(struct notifier_block *nfb, - unsigned long action, - void *hcpu) +int workqueue_offline_cpu(unsigned int cpu) { - int cpu = (unsigned long)hcpu; struct work_struct unbind_work; struct workqueue_struct *wq; - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DOWN_PREPARE: - /* unbinding per-cpu workers should happen on the local CPU */ - INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); - queue_work_on(cpu, system_highpri_wq, &unbind_work); - - /* update NUMA affinity of unbound workqueues */ - mutex_lock(&wq_pool_mutex); - list_for_each_entry(wq, &workqueues, list) - wq_update_unbound_numa(wq, cpu, false); - mutex_unlock(&wq_pool_mutex); - - /* wait for per-cpu unbinding to finish */ - flush_work(&unbind_work); - destroy_work_on_stack(&unbind_work); - break; - } - return NOTIFY_OK; + /* unbinding per-cpu workers should happen on the local CPU */ + INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); + queue_work_on(cpu, system_highpri_wq, &unbind_work); + + /* update NUMA affinity of unbound workqueues */ + mutex_lock(&wq_pool_mutex); + list_for_each_entry(wq, &workqueues, list) + wq_update_unbound_numa(wq, cpu, false); + mutex_unlock(&wq_pool_mutex); + + /* wait for per-cpu unbinding to finish */ + flush_work(&unbind_work); + destroy_work_on_stack(&unbind_work); + return 0; } #ifdef CONFIG_SMP @@ -5490,9 +5471,6 @@ static int __init init_workqueues(void) pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); - cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); - hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); - wq_numa_init(); /* initialize CPU pools */ -- cgit v1.2.3-59-g8ed1b From 48d7f6c715782a6f6d4a5535f1233b764ce6b6f2 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:30 +0000 Subject: x86/hpet: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Denys Vlasenko Cc: Jan Beulich Cc: Jiang Liu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Viresh Kumar Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.279718463@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/hpet.c | 69 +++++++++++++++++++++++----------------------- include/linux/cpuhotplug.h | 2 ++ 2 files changed, 36 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index f112af7aa62e..3d747070fe67 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -710,31 +710,29 @@ static void hpet_work(struct work_struct *w) complete(&hpet_work->complete); } -static int hpet_cpuhp_notify(struct notifier_block *n, - unsigned long action, void *hcpu) +static int hpet_cpuhp_online(unsigned int cpu) { - unsigned long cpu = (unsigned long)hcpu; struct hpet_work_struct work; + + INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); + init_completion(&work.complete); + /* FIXME: add schedule_work_on() */ + schedule_delayed_work_on(cpu, &work.work, 0); + wait_for_completion(&work.complete); + destroy_delayed_work_on_stack(&work.work); + return 0; +} + +static int hpet_cpuhp_dead(unsigned int cpu) +{ struct hpet_dev *hdev = per_cpu(cpu_hpet_dev, cpu); - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: - INIT_DELAYED_WORK_ONSTACK(&work.work, hpet_work); - init_completion(&work.complete); - /* FIXME: add schedule_work_on() */ - schedule_delayed_work_on(cpu, &work.work, 0); - wait_for_completion(&work.complete); - destroy_delayed_work_on_stack(&work.work); - break; - case CPU_DEAD: - if (hdev) { - free_irq(hdev->irq, hdev); - hdev->flags &= ~HPET_DEV_USED; - per_cpu(cpu_hpet_dev, cpu) = NULL; - } - break; - } - return NOTIFY_OK; + if (!hdev) + return 0; + free_irq(hdev->irq, hdev); + hdev->flags &= ~HPET_DEV_USED; + per_cpu(cpu_hpet_dev, cpu) = NULL; + return 0; } #else @@ -750,11 +748,8 @@ static void hpet_reserve_msi_timers(struct hpet_data *hd) } #endif -static int hpet_cpuhp_notify(struct notifier_block *n, - unsigned long action, void *hcpu) -{ - return NOTIFY_OK; -} +#define hpet_cpuhp_online NULL +#define hpet_cpuhp_dead NULL #endif @@ -931,7 +926,7 @@ out_nohpet: */ static __init int hpet_late_init(void) { - int cpu; + int ret; if (boot_hpet_disable) return -ENODEV; @@ -961,16 +956,20 @@ static __init int hpet_late_init(void) if (boot_cpu_has(X86_FEATURE_ARAT)) return 0; - cpu_notifier_register_begin(); - for_each_online_cpu(cpu) { - hpet_cpuhp_notify(NULL, CPU_ONLINE, (void *)(long)cpu); - } - /* This notifier should be called after workqueue is ready */ - __hotcpu_notifier(hpet_cpuhp_notify, -20); - cpu_notifier_register_done(); - + ret = cpuhp_setup_state(CPUHP_AP_X86_HPET_ONLINE, "AP_X86_HPET_ONLINE", + hpet_cpuhp_online, NULL); + if (ret) + return ret; + ret = cpuhp_setup_state(CPUHP_X86_HPET_DEAD, "X86_HPET_DEAD", NULL, + hpet_cpuhp_dead); + if (ret) + goto err_cpuhp; return 0; + +err_cpuhp: + cpuhp_remove_state(CPUHP_AP_X86_HPET_ONLINE); + return ret; } fs_initcall(hpet_late_init); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 60557a9e783d..7449081ff412 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -12,6 +12,7 @@ enum cpuhp_state { CPUHP_PERF_BFIN, CPUHP_PERF_POWER, CPUHP_PERF_SUPERH, + CPUHP_X86_HPET_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, @@ -54,6 +55,7 @@ enum cpuhp_state { CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, + CPUHP_AP_X86_HPET_ONLINE, CPUHP_AP_ACTIVE, CPUHP_ONLINE, }; -- cgit v1.2.3-59-g8ed1b From 251a5fd64b720a0646eddb8c54f5e9ddba066e72 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:33 +0000 Subject: x86/kvm/kvmclock: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. We assumed that the priority ordering was ment to invoke the online callback as the last step. In the original code this also invoked the down prepare callback as the last step. With the symmetric state machine the down prepare callback is now the first step. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Acked-by: Paolo Bonzini Cc: Gleb Natapov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Radim Krcmar Cc: Thomas Gleixner Cc: kvm@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.542880859@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kvm/x86.c | 33 ++++++++------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 9 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6f90595fdd64..f899127b4832 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -5552,9 +5552,10 @@ int kvm_fast_pio_out(struct kvm_vcpu *vcpu, int size, unsigned short port) } EXPORT_SYMBOL_GPL(kvm_fast_pio_out); -static void tsc_bad(void *info) +static int kvmclock_cpu_down_prep(unsigned int cpu) { __this_cpu_write(cpu_tsc_khz, 0); + return 0; } static void tsc_khz_changed(void *data) @@ -5659,33 +5660,18 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = { .notifier_call = kvmclock_cpufreq_notifier }; -static int kvmclock_cpu_notifier(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int kvmclock_cpu_online(unsigned int cpu) { - switch (action) { - case CPU_ONLINE: - case CPU_DOWN_FAILED: - tsc_khz_changed(NULL); - break; - case CPU_DOWN_PREPARE: - tsc_bad(NULL); - break; - } - return NOTIFY_OK; + tsc_khz_changed(NULL); + return 0; } -static struct notifier_block kvmclock_cpu_notifier_block = { - .notifier_call = kvmclock_cpu_notifier, - .priority = -INT_MAX -}; - static void kvm_timer_init(void) { int cpu; max_tsc_khz = tsc_khz; - cpu_notifier_register_begin(); if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { #ifdef CONFIG_CPU_FREQ struct cpufreq_policy policy; @@ -5700,12 +5686,9 @@ static void kvm_timer_init(void) CPUFREQ_TRANSITION_NOTIFIER); } pr_debug("kvm: max_tsc_khz = %ld\n", max_tsc_khz); - for_each_online_cpu(cpu) - smp_call_function_single(cpu, tsc_khz_changed, NULL, 1); - - __register_hotcpu_notifier(&kvmclock_cpu_notifier_block); - cpu_notifier_register_done(); + cpuhp_setup_state(CPUHP_AP_X86_KVM_CLK_ONLINE, "AP_X86_KVM_CLK_ONLINE", + kvmclock_cpu_online, kvmclock_cpu_down_prep); } static DEFINE_PER_CPU(struct kvm_vcpu *, current_vcpu); @@ -5894,7 +5877,7 @@ void kvm_arch_exit(void) if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) cpufreq_unregister_notifier(&kvmclock_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); - unregister_hotcpu_notifier(&kvmclock_cpu_notifier_block); + cpuhp_remove_state_nocalls(CPUHP_AP_X86_KVM_CLK_ONLINE); #ifdef CONFIG_X86_64 pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier); #endif diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 7449081ff412..8c190a85af6e 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -56,6 +56,7 @@ enum cpuhp_state { CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, CPUHP_AP_X86_HPET_ONLINE, + CPUHP_AP_X86_KVM_CLK_ONLINE, CPUHP_AP_ACTIVE, CPUHP_ONLINE, }; -- cgit v1.2.3-59-g8ed1b From 148b9e2abea6f50e4620e4ac0683b4913c4364c0 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:34 +0000 Subject: x86/apb_timer: Convert to hotplug state machine Install the callbacks via the state machine. There is no setup just one teardown callback. Remove the silly comment about the workqueue up dependency. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.625342983@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/apb_timer.c | 29 ++++++++++------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 11 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index cefacbad1531..456316f6c868 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -215,26 +215,18 @@ void apbt_setup_secondary_clock(void) * cpu timers during the offline process due to the ordering of notification. * the extra interrupt is harmless. */ -static int apbt_cpuhp_notify(struct notifier_block *n, - unsigned long action, void *hcpu) +static int apbt_cpu_dead(unsigned int cpu) { - unsigned long cpu = (unsigned long)hcpu; struct apbt_dev *adev = &per_cpu(cpu_apbt_dev, cpu); - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_DEAD: - dw_apb_clockevent_pause(adev->timer); - if (system_state == SYSTEM_RUNNING) { - pr_debug("skipping APBT CPU %lu offline\n", cpu); - } else { - pr_debug("APBT clockevent for cpu %lu offline\n", cpu); - dw_apb_clockevent_stop(adev->timer); - } - break; - default: - pr_debug("APBT notified %lu, no action\n", action); + dw_apb_clockevent_pause(adev->timer); + if (system_state == SYSTEM_RUNNING) { + pr_debug("skipping APBT CPU %u offline\n", cpu); + } else { + pr_debug("APBT clockevent for cpu %u offline\n", cpu); + dw_apb_clockevent_stop(adev->timer); } - return NOTIFY_OK; + return 0; } static __init int apbt_late_init(void) @@ -242,9 +234,8 @@ static __init int apbt_late_init(void) if (intel_mid_timer_options == INTEL_MID_TIMER_LAPIC_APBT || !apb_timer_block_enabled) return 0; - /* This notifier should be called after workqueue is ready */ - hotcpu_notifier(apbt_cpuhp_notify, -20); - return 0; + return cpuhp_setup_state(CPUHP_X86_APB_DEAD, "X86_APB_DEAD", NULL, + apbt_cpu_dead); } fs_initcall(apbt_late_init); #else diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 8c190a85af6e..3e719ba44fbe 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -13,6 +13,7 @@ enum cpuhp_state { CPUHP_PERF_POWER, CPUHP_PERF_SUPERH, CPUHP_X86_HPET_DEAD, + CPUHP_X86_APB_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, -- cgit v1.2.3-59-g8ed1b From e5b61bafe70477e05e1dce0d6ca4ec181e23cb2a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:35 +0000 Subject: arm: Convert VFP hotplug notifiers to state machine Straight forward conversion plus commentary why code which is executed in hotplug callbacks needs to be invoked before installing them. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russell King Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.713612993@linutronix.de Signed-off-by: Ingo Molnar --- arch/arm/vfp/vfpmodule.c | 28 +++++++++++++++++----------- include/linux/cpuhotplug.h | 1 + 2 files changed, 18 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 73085d3482ed..da0b33deba6d 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c @@ -643,19 +643,19 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp, * hardware state at every thread switch. We clear our held state when * a CPU has been killed, indicating that the VFP hardware doesn't contain * a threads VFP state. When a CPU starts up, we re-enable access to the - * VFP hardware. - * - * Both CPU_DYING and CPU_STARTING are called on the CPU which + * VFP hardware. The callbacks below are called on the CPU which * is being offlined/onlined. */ -static int vfp_hotplug(struct notifier_block *b, unsigned long action, - void *hcpu) +static int vfp_dying_cpu(unsigned int cpu) { - if (action == CPU_DYING || action == CPU_DYING_FROZEN) - vfp_current_hw_state[(long)hcpu] = NULL; - else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) - vfp_enable(NULL); - return NOTIFY_OK; + vfp_force_reload(cpu, current_thread_info()); + return 0; +} + +static int vfp_starting_cpu(unsigned int unused) +{ + vfp_enable(NULL); + return 0; } void vfp_kmode_exception(void) @@ -732,6 +732,10 @@ static int __init vfp_init(void) unsigned int vfpsid; unsigned int cpu_arch = cpu_architecture(); + /* + * Enable the access to the VFP on all online CPUs so the + * following test on FPSID will succeed. + */ if (cpu_arch >= CPU_ARCH_ARMv6) on_each_cpu(vfp_enable, NULL, 1); @@ -794,7 +798,9 @@ static int __init vfp_init(void) VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; } - hotcpu_notifier(vfp_hotplug, 0); + cpuhp_setup_state_nocalls(CPUHP_AP_ARM_VFP_STARTING, + "AP_ARM_VFP_STARTING", vfp_starting_cpu, + vfp_dying_cpu); vfp_vector = vfp_support_entry; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 3e719ba44fbe..4c63cb30aee6 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -34,6 +34,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CQM_STARTING, CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, + CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 7d88eb695a1f5f67820e02999b949d5cfa080442 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:36 +0000 Subject: arm/perf: Convert to hotplug state machine Straight forward conversion w/o bells and whistles. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Linus Torvalds Cc: Mark Rutland Cc: Peter Zijlstra Cc: Will Deacon Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.794097159@linutronix.de Signed-off-by: Ingo Molnar --- drivers/perf/arm_pmu.c | 36 +++++++++++++++--------------------- include/linux/cpuhotplug.h | 1 + include/linux/perf/arm_pmu.h | 1 - 3 files changed, 16 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 140436a046c0..ae9fc6c6add3 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -691,24 +691,15 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading * junk values out of them. */ -static int cpu_pmu_notify(struct notifier_block *b, unsigned long action, - void *hcpu) +static int arm_perf_starting_cpu(unsigned int cpu) { - int cpu = (unsigned long)hcpu; - struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb); - - if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) - return NOTIFY_DONE; - - if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) - return NOTIFY_DONE; - - if (pmu->reset) - pmu->reset(pmu); - else - return NOTIFY_DONE; - - return NOTIFY_OK; + if (!__oprofile_cpu_pmu) + return 0; + if (!cpumask_test_cpu(cpu, &__oprofile_cpu_pmu->supported_cpus)) + return 0; + if (__oprofile_cpu_pmu->reset) + __oprofile_cpu_pmu->reset(__oprofile_cpu_pmu); + return 0; } #ifdef CONFIG_CPU_PM @@ -819,8 +810,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) if (!cpu_hw_events) return -ENOMEM; - cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify; - err = register_cpu_notifier(&cpu_pmu->hotplug_nb); + err = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING, + "AP_PERF_ARM_STARTING", + arm_perf_starting_cpu, NULL); if (err) goto out_hw_events; @@ -858,7 +850,7 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) return 0; out_unregister: - unregister_cpu_notifier(&cpu_pmu->hotplug_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_STARTING); out_hw_events: free_percpu(cpu_hw_events); return err; @@ -867,7 +859,7 @@ out_hw_events: static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) { cpu_pm_pmu_unregister(cpu_pmu); - unregister_cpu_notifier(&cpu_pmu->hotplug_nb); + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_STARTING); free_percpu(cpu_pmu->hw_events); } @@ -1027,6 +1019,8 @@ int arm_pmu_device_probe(struct platform_device *pdev, if (ret) goto out_destroy; + WARN(__oprofile_cpu_pmu, "%s(): missing PMU strucure for CPU-hotplug\n", + __func__); if (!__oprofile_cpu_pmu) __oprofile_cpu_pmu = pmu; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 4c63cb30aee6..a5b6a6526af8 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -35,6 +35,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_ARM_VFP_STARTING, + CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index d28ac05c7f92..e6ed34eb2b2b 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -109,7 +109,6 @@ struct arm_pmu { DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); struct platform_device *plat_device; struct pmu_hw_events __percpu *hw_events; - struct notifier_block hotplug_nb; struct notifier_block cpu_pm_nb; }; -- cgit v1.2.3-59-g8ed1b From 8c18b2d2d0881b116ea52498c6d624c0a71e1fdc Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:16:37 +0000 Subject: virt: Convert kvm hotplug to state machine Install the callbacks via the state machine. The core won't invoke the callbacks on already online CPUs. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Acked-by: Paolo Bonzini Cc: Gleb Natapov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Radim Krcmar Cc: kvm@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153335.886159080@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 1 + virt/kvm/kvm_main.c | 32 ++++++++------------------------ 2 files changed, 9 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index a5b6a6526af8..5423997c94f7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -36,6 +36,7 @@ enum cpuhp_state { CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_PERF_ARM_STARTING, + CPUHP_AP_KVM_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 48bd520fc702..c1d6cf5a74a1 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3144,12 +3144,13 @@ static void hardware_enable_nolock(void *junk) } } -static void hardware_enable(void) +static int kvm_starting_cpu(unsigned int cpu) { raw_spin_lock(&kvm_count_lock); if (kvm_usage_count) hardware_enable_nolock(NULL); raw_spin_unlock(&kvm_count_lock); + return 0; } static void hardware_disable_nolock(void *junk) @@ -3162,12 +3163,13 @@ static void hardware_disable_nolock(void *junk) kvm_arch_hardware_disable(); } -static void hardware_disable(void) +static int kvm_dying_cpu(unsigned int cpu) { raw_spin_lock(&kvm_count_lock); if (kvm_usage_count) hardware_disable_nolock(NULL); raw_spin_unlock(&kvm_count_lock); + return 0; } static void hardware_disable_all_nolock(void) @@ -3208,21 +3210,6 @@ static int hardware_enable_all(void) return r; } -static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val, - void *v) -{ - val &= ~CPU_TASKS_FROZEN; - switch (val) { - case CPU_DYING: - hardware_disable(); - break; - case CPU_STARTING: - hardware_enable(); - break; - } - return NOTIFY_OK; -} - static int kvm_reboot(struct notifier_block *notifier, unsigned long val, void *v) { @@ -3489,10 +3476,6 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, return r; } -static struct notifier_block kvm_cpu_notifier = { - .notifier_call = kvm_cpu_hotplug, -}; - static int kvm_debugfs_open(struct inode *inode, struct file *file, int (*get)(void *, u64 *), int (*set)(void *, u64), const char *fmt) @@ -3743,7 +3726,8 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, goto out_free_1; } - r = register_cpu_notifier(&kvm_cpu_notifier); + r = cpuhp_setup_state_nocalls(CPUHP_AP_KVM_STARTING, "AP_KVM_STARTING", + kvm_starting_cpu, kvm_dying_cpu); if (r) goto out_free_2; register_reboot_notifier(&kvm_reboot_notifier); @@ -3797,7 +3781,7 @@ out_free: kmem_cache_destroy(kvm_vcpu_cache); out_free_3: unregister_reboot_notifier(&kvm_reboot_notifier); - unregister_cpu_notifier(&kvm_cpu_notifier); + cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); out_free_2: out_free_1: kvm_arch_hardware_unsetup(); @@ -3820,7 +3804,7 @@ void kvm_exit(void) kvm_async_pf_deinit(); unregister_syscore_ops(&kvm_syscore_ops); unregister_reboot_notifier(&kvm_reboot_notifier); - unregister_cpu_notifier(&kvm_cpu_notifier); + cpuhp_remove_state_nocalls(CPUHP_AP_KVM_STARTING); on_each_cpu(hardware_disable_nolock, NULL, 1); kvm_arch_hardware_unsetup(); kvm_arch_exit(); -- cgit v1.2.3-59-g8ed1b From 7e86e8bd8dd67649d176e08d8dfb90039f0a1e98 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:39 +0000 Subject: clocksource/arm_arch_timer: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Daniel Lezcano Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.048259040@linutronix.de Signed-off-by: Ingo Molnar --- drivers/clocksource/arm_arch_timer.c | 54 +++++++++++++++++------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 27 insertions(+), 28 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 5effd3027319..28bce3f4f81d 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -370,8 +370,10 @@ static bool arch_timer_has_nonsecure_ppi(void) arch_timer_ppi[PHYS_NONSECURE_PPI]); } -static int arch_timer_setup(struct clock_event_device *clk) +static int arch_timer_starting_cpu(unsigned int cpu) { + struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); + __arch_timer_setup(ARCH_CP15_TIMER, clk); enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0); @@ -527,29 +529,14 @@ static void arch_timer_stop(struct clock_event_device *clk) clk->set_state_shutdown(clk); } -static int arch_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int arch_timer_dying_cpu(unsigned int cpu) { - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - arch_timer_setup(this_cpu_ptr(arch_timer_evt)); - break; - case CPU_DYING: - arch_timer_stop(this_cpu_ptr(arch_timer_evt)); - break; - } + struct clock_event_device *clk = this_cpu_ptr(arch_timer_evt); - return NOTIFY_OK; + arch_timer_stop(clk); + return 0; } -static struct notifier_block arch_timer_cpu_nb = { - .notifier_call = arch_timer_cpu_notify, -}; - #ifdef CONFIG_CPU_PM static unsigned int saved_cntkctl; static int arch_timer_cpu_pm_notify(struct notifier_block *self, @@ -570,11 +557,21 @@ static int __init arch_timer_cpu_pm_init(void) { return cpu_pm_register_notifier(&arch_timer_cpu_pm_notifier); } + +static void __init arch_timer_cpu_pm_deinit(void) +{ + WARN_ON(cpu_pm_unregister_notifier(&arch_timer_cpu_pm_notifier)); +} + #else static int __init arch_timer_cpu_pm_init(void) { return 0; } + +static void __init arch_timer_cpu_pm_deinit(void) +{ +} #endif static int __init arch_timer_register(void) @@ -621,22 +618,23 @@ static int __init arch_timer_register(void) goto out_free; } - err = register_cpu_notifier(&arch_timer_cpu_nb); - if (err) - goto out_free_irq; - err = arch_timer_cpu_pm_init(); if (err) goto out_unreg_notify; - /* Immediately configure the timer on the boot CPU */ - arch_timer_setup(this_cpu_ptr(arch_timer_evt)); + /* Register and immediately configure the timer on the boot CPU */ + err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, + "AP_ARM_ARCH_TIMER_STARTING", + arch_timer_starting_cpu, arch_timer_dying_cpu); + if (err) + goto out_unreg_cpupm; return 0; +out_unreg_cpupm: + arch_timer_cpu_pm_deinit(); + out_unreg_notify: - unregister_cpu_notifier(&arch_timer_cpu_nb); -out_free_irq: free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt); if (arch_timer_has_nonsecure_ppi()) free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 5423997c94f7..f0524cd70e9b 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -36,6 +36,7 @@ enum cpuhp_state { CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_PERF_ARM_STARTING, + CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, -- cgit v1.2.3-59-g8ed1b From 00c1d17aab513d3b8117fb84644eba39434b33e4 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:41 +0000 Subject: clocksource/dummy_timer: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Daniel Lezcano Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.130385842@linutronix.de Signed-off-by: Ingo Molnar --- drivers/clocksource/dummy_timer.c | 36 ++++++------------------------------ include/linux/cpuhotplug.h | 1 + 2 files changed, 7 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c index 776b6c86dcd5..89f1c2edbe02 100644 --- a/drivers/clocksource/dummy_timer.c +++ b/drivers/clocksource/dummy_timer.c @@ -16,10 +16,9 @@ static DEFINE_PER_CPU(struct clock_event_device, dummy_timer_evt); -static void dummy_timer_setup(void) +static int dummy_timer_starting_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); - struct clock_event_device *evt = raw_cpu_ptr(&dummy_timer_evt); + struct clock_event_device *evt = per_cpu_ptr(&dummy_timer_evt, cpu); evt->name = "dummy_timer"; evt->features = CLOCK_EVT_FEAT_PERIODIC | @@ -29,36 +28,13 @@ static void dummy_timer_setup(void) evt->cpumask = cpumask_of(cpu); clockevents_register_device(evt); + return 0; } -static int dummy_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) - dummy_timer_setup(); - - return NOTIFY_OK; -} - -static struct notifier_block dummy_timer_cpu_nb = { - .notifier_call = dummy_timer_cpu_notify, -}; - static int __init dummy_timer_register(void) { - int err = 0; - - cpu_notifier_register_begin(); - err = __register_cpu_notifier(&dummy_timer_cpu_nb); - if (err) - goto out; - - /* We won't get a call on the boot CPU, so register immediately */ - if (num_possible_cpus() > 1) - dummy_timer_setup(); - -out: - cpu_notifier_register_done(); - return err; + return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING, + "AP_DUMMY_TIMER_STARTING", + dummy_timer_starting_cpu, NULL); } early_initcall(dummy_timer_register); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index f0524cd70e9b..81035613a836 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -37,6 +37,7 @@ enum cpuhp_state { CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, + CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, -- cgit v1.2.3-59-g8ed1b From 31e8e5db8849e4c7084dd5bce7b7d96d2fb14b8b Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:42 +0000 Subject: clocksource/metag: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Daniel Lezcano Cc: James Hogan Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-metag@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.215137642@linutronix.de Signed-off-by: Ingo Molnar --- drivers/clocksource/metag_generic.c | 33 ++++++--------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 7 insertions(+), 27 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c index bcd5c0d602a0..a80ab3e446b7 100644 --- a/drivers/clocksource/metag_generic.c +++ b/drivers/clocksource/metag_generic.c @@ -90,7 +90,7 @@ unsigned long long sched_clock(void) return ticks << HARDWARE_TO_NS_SHIFT; } -static void arch_timer_setup(unsigned int cpu) +static int arch_timer_starting_cpu(unsigned int cpu) { unsigned int txdivtime; struct clock_event_device *clk = &per_cpu(local_clockevent, cpu); @@ -132,27 +132,9 @@ static void arch_timer_setup(unsigned int cpu) val = core_reg_read(TXUCT_ID, TXTIMER_REGNUM, thread0); __core_reg_set(TXTIMER, val); } + return 0; } -static int arch_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - int cpu = (long)hcpu; - - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - arch_timer_setup(cpu); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block arch_timer_cpu_nb = { - .notifier_call = arch_timer_cpu_notify, -}; - int __init metag_generic_timer_init(void) { /* @@ -170,11 +152,8 @@ int __init metag_generic_timer_init(void) setup_irq(tbisig_map(TBID_SIGNUM_TRT), &metag_timer_irq); - /* Configure timer on boot CPU */ - arch_timer_setup(smp_processor_id()); - - /* Hook cpu boot to configure other CPU's timers */ - register_cpu_notifier(&arch_timer_cpu_nb); - - return 0; + /* Hook cpu boot to configure the CPU's timers */ + return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING, + "AP_METAG_TIMER_STARTING", + arch_timer_starting_cpu, NULL); } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 81035613a836..afba3b19006c 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -38,6 +38,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING, + CPUHP_AP_METAG_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, -- cgit v1.2.3-59-g8ed1b From b04041655d0cf84b77802cb0b4517e672df04f01 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:43 +0000 Subject: clocksource/qcom-timer: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Daniel Lezcano Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.295486558@linutronix.de Signed-off-by: Ingo Molnar --- drivers/clocksource/qcom-timer.c | 41 +++++++++++----------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 12 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c index 662576339049..3283cfa2aa52 100644 --- a/drivers/clocksource/qcom-timer.c +++ b/drivers/clocksource/qcom-timer.c @@ -105,9 +105,9 @@ static struct clocksource msm_clocksource = { static int msm_timer_irq; static int msm_timer_has_ppi; -static int msm_local_timer_setup(struct clock_event_device *evt) +static int msm_local_timer_starting_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); + struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); int err; evt->irq = msm_timer_irq; @@ -135,35 +135,15 @@ static int msm_local_timer_setup(struct clock_event_device *evt) return 0; } -static void msm_local_timer_stop(struct clock_event_device *evt) +static int msm_local_timer_dying_cpu(unsigned int cpu) { + struct clock_event_device *evt = per_cpu_ptr(msm_evt, cpu); + evt->set_state_shutdown(evt); disable_percpu_irq(evt->irq); + return 0; } -static int msm_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - msm_local_timer_setup(this_cpu_ptr(msm_evt)); - break; - case CPU_DYING: - msm_local_timer_stop(this_cpu_ptr(msm_evt)); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block msm_timer_cpu_nb = { - .notifier_call = msm_timer_cpu_notify, -}; - static u64 notrace msm_sched_clock_read(void) { return msm_clocksource.read(&msm_clocksource); @@ -200,14 +180,15 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, if (res) { pr_err("request_percpu_irq failed\n"); } else { - res = register_cpu_notifier(&msm_timer_cpu_nb); + /* Install and invoke hotplug callbacks */ + res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, + "AP_QCOM_TIMER_STARTING", + msm_local_timer_starting_cpu, + msm_local_timer_dying_cpu); if (res) { free_percpu_irq(irq, msm_evt); goto err; } - - /* Immediately configure the timer on the boot CPU */ - msm_local_timer_setup(raw_cpu_ptr(msm_evt)); } err: diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index afba3b19006c..d74d15a028e4 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -39,6 +39,7 @@ enum cpuhp_state { CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_METAG_TIMER_STARTING, + CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, -- cgit v1.2.3-59-g8ed1b From 2dab90932fd08f101a45acc0495ccd77b869da5d Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:44 +0000 Subject: clocksource/mips-gic: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Daniel Lezcano Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.380737946@linutronix.de Signed-off-by: Ingo Molnar --- drivers/clocksource/mips-gic-timer.c | 38 ++++++++++++------------------------ include/linux/cpuhotplug.h | 1 + 2 files changed, 14 insertions(+), 25 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 1572c7a778ab..d91e8725917c 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -49,10 +49,9 @@ struct irqaction gic_compare_irqaction = { .name = "timer", }; -static void gic_clockevent_cpu_init(struct clock_event_device *cd) +static void gic_clockevent_cpu_init(unsigned int cpu, + struct clock_event_device *cd) { - unsigned int cpu = smp_processor_id(); - cd->name = "MIPS GIC"; cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP; @@ -79,19 +78,10 @@ static void gic_update_frequency(void *data) clockevents_update_freq(this_cpu_ptr(&gic_clockevent_device), rate); } -static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action, - void *data) +static int gic_starting_cpu(unsigned int cpu) { - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device)); - break; - case CPU_DYING: - gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device)); - break; - } - - return NOTIFY_OK; + gic_clockevent_cpu_init(cpu, this_cpu_ptr(&gic_clockevent_device)); + return 0; } static int gic_clk_notifier(struct notifier_block *nb, unsigned long action, @@ -105,10 +95,11 @@ static int gic_clk_notifier(struct notifier_block *nb, unsigned long action, return NOTIFY_OK; } - -static struct notifier_block gic_cpu_nb = { - .notifier_call = gic_cpu_notifier, -}; +static int gic_dying_cpu(unsigned int cpu) +{ + gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device)); + return 0; +} static struct notifier_block gic_clk_nb = { .notifier_call = gic_clk_notifier, @@ -125,12 +116,9 @@ static int gic_clockevent_init(void) if (ret < 0) return ret; - ret = register_cpu_notifier(&gic_cpu_nb); - if (ret < 0) - pr_warn("GIC: Unable to register CPU notifier\n"); - - gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device)); - + cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING, + "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu, + gic_dying_cpu); return 0; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d74d15a028e4..456e3d93272c 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -40,6 +40,7 @@ enum cpuhp_state { CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_METAG_TIMER_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, + CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, -- cgit v1.2.3-59-g8ed1b From 911a359de96d5e9d24f50c74a48c6f3cf061539a Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:45 +0000 Subject: leds/trigger/cpu: Convert to hotplug state machine This is a straightforward conversion. We place this callback last in the list so that the LED illuminates only after a successful bring up sequence. ( NOTE: The patch adds a FIXME question about the callback used, this question should probably be revisited later on.) Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Acked-by: Jacek Anaszewski Cc: Linus Torvalds Cc: Linus Walleij Cc: Paul Gortmaker Cc: Peter Zijlstra Cc: Richard Purdie Cc: Thomas Gleixner Cc: linux-leds@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.465496902@linutronix.de Signed-off-by: Ingo Molnar --- drivers/leds/trigger/ledtrig-cpu.c | 32 +++++++++++++++----------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 16 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c index 938467fb82be..4a6a182d0a88 100644 --- a/drivers/leds/trigger/ledtrig-cpu.c +++ b/drivers/leds/trigger/ledtrig-cpu.c @@ -92,25 +92,17 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = { .resume = ledtrig_cpu_syscore_resume, }; -static int ledtrig_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int ledtrig_starting_cpu(unsigned int cpu) { - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - ledtrig_cpu(CPU_LED_START); - break; - case CPU_DYING: - ledtrig_cpu(CPU_LED_STOP); - break; - } - - return NOTIFY_OK; + ledtrig_cpu(CPU_LED_START); + return 0; } - -static struct notifier_block ledtrig_cpu_nb = { - .notifier_call = ledtrig_cpu_notify, -}; +static int ledtrig_dying_cpu(unsigned int cpu) +{ + ledtrig_cpu(CPU_LED_STOP); + return 0; +} static int __init ledtrig_cpu_init(void) { @@ -133,7 +125,13 @@ static int __init ledtrig_cpu_init(void) } register_syscore_ops(&ledtrig_cpu_syscore_ops); - register_cpu_notifier(&ledtrig_cpu_nb); + + /* + * FIXME: Why needs this to happen in the interrupt disabled + * low level bringup phase of a cpu? + */ + cpuhp_setup_state(CPUHP_AP_LEDTRIG_STARTING, "AP_LEDTRIG_STARTING", + ledtrig_starting_cpu, ledtrig_dying_cpu); pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n"); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 456e3d93272c..24f56e290003 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -42,6 +42,7 @@ enum cpuhp_state { CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_KVM_STARTING, + CPUHP_AP_LEDTRIG_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 42ec50b5f257b19bd5653c40933b0803d5c4179a Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:46 +0000 Subject: arm/kvm/vgic: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. The VGIC callback is run after KVM's main callback since it reflects the makefile order. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Christoffer Dall Cc: Gleb Natapov Cc: Linus Torvalds Cc: Marc Zyngier Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Radim Krcmar Cc: Thomas Gleixner Cc: kvm@vger.kernel.org Cc: kvmarm@lists.cs.columbia.edu Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.546953286@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 1 + virt/kvm/arm/vgic.c | 39 ++++++++------------------------------- 2 files changed, 9 insertions(+), 31 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 24f56e290003..ff37caccafce 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -42,6 +42,7 @@ enum cpuhp_state { CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_KVM_STARTING, + CPUHP_AP_KVM_ARM_VGIC_STARTING, CPUHP_AP_LEDTRIG_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c index c3bfbb981e73..67cb5e948be2 100644 --- a/virt/kvm/arm/vgic.c +++ b/virt/kvm/arm/vgic.c @@ -2326,32 +2326,18 @@ int vgic_has_attr_regs(const struct vgic_io_range *ranges, phys_addr_t offset) return -ENXIO; } -static void vgic_init_maintenance_interrupt(void *info) +static int vgic_starting_cpu(unsigned int cpu) { enable_percpu_irq(vgic->maint_irq, 0); + return 0; } -static int vgic_cpu_notify(struct notifier_block *self, - unsigned long action, void *cpu) +static int vgic_dying_cpu(unsigned int cpu) { - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - vgic_init_maintenance_interrupt(NULL); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - disable_percpu_irq(vgic->maint_irq); - break; - } - - return NOTIFY_OK; + disable_percpu_irq(vgic->maint_irq); + return 0; } -static struct notifier_block vgic_cpu_nb = { - .notifier_call = vgic_cpu_notify, -}; - static int kvm_vgic_probe(void) { const struct gic_kvm_info *gic_kvm_info; @@ -2392,19 +2378,10 @@ int kvm_vgic_hyp_init(void) return ret; } - ret = __register_cpu_notifier(&vgic_cpu_nb); - if (ret) { - kvm_err("Cannot register vgic CPU notifier\n"); - goto out_free_irq; - } - - on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); - + cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_STARTING, + "AP_KVM_ARM_VGIC_STARTING", vgic_starting_cpu, + vgic_dying_cpu); return 0; - -out_free_irq: - free_percpu_irq(vgic->maint_irq, kvm_get_running_vcpus()); - return ret; } int kvm_irq_map_gsi(struct kvm *kvm, -- cgit v1.2.3-59-g8ed1b From b3c9950a5c75983c0a2fcc93760f53ba52da3c85 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:47 +0000 Subject: arm/kvm/arch_timer: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Christoffer Dall Cc: Gleb Natapov Cc: Linus Torvalds Cc: Marc Zyngier Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Radim Krcmar Cc: Thomas Gleixner Cc: kvm@vger.kernel.org Cc: kvmarm@lists.cs.columbia.edu Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.634155707@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 1 + virt/kvm/arm/arch_timer.c | 35 +++++++++++------------------------ 2 files changed, 12 insertions(+), 24 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index ff37caccafce..88986c979b0f 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -43,6 +43,7 @@ enum cpuhp_state { CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_STARTING, + CPUHP_AP_KVM_ARM_TIMER_STARTING, CPUHP_AP_LEDTRIG_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index e2d5b6f988fb..4fde8c7dfcfe 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -405,26 +405,17 @@ u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) return (u64)-1; } -static int kvm_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *cpu) +static int kvm_timer_starting_cpu(unsigned int cpu) { - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - kvm_timer_init_interrupt(NULL); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - disable_percpu_irq(host_vtimer_irq); - break; - } - - return NOTIFY_OK; + kvm_timer_init_interrupt(NULL); + return 0; } -static struct notifier_block kvm_timer_cpu_nb = { - .notifier_call = kvm_timer_cpu_notify, -}; +static int kvm_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(host_vtimer_irq); + return 0; +} int kvm_timer_hyp_init(void) { @@ -449,12 +440,6 @@ int kvm_timer_hyp_init(void) goto out; } - err = __register_cpu_notifier(&kvm_timer_cpu_nb); - if (err) { - kvm_err("Cannot register timer CPU notifier\n"); - goto out_free; - } - wqueue = create_singlethread_workqueue("kvm_arch_timer"); if (!wqueue) { err = -ENOMEM; @@ -462,8 +447,10 @@ int kvm_timer_hyp_init(void) } kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); - on_each_cpu(kvm_timer_init_interrupt, NULL, 1); + cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, + "AP_KVM_ARM_TIMER_STARTING", kvm_timer_starting_cpu, + kvm_timer_dying_cpu); goto out; out_free: free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); -- cgit v1.2.3-59-g8ed1b From 04d045a681122c806e4f17754ed48e2b19af5d80 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:49 +0000 Subject: metag/perf: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: James Hogan Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-metag@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.717395164@linutronix.de Signed-off-by: Ingo Molnar --- arch/metag/kernel/perf/perf_event.c | 26 +++++++------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 8 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c index 33a365f924be..052cba23708c 100644 --- a/arch/metag/kernel/perf/perf_event.c +++ b/arch/metag/kernel/perf/perf_event.c @@ -806,25 +806,16 @@ static struct metag_pmu _metag_pmu = { }; /* PMU CPU hotplug notifier */ -static int metag_pmu_cpu_notify(struct notifier_block *b, unsigned long action, - void *hcpu) +static int metag_pmu_starting_cpu(unsigned int cpu) { - unsigned int cpu = (unsigned int)hcpu; struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); - if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING) - return NOTIFY_DONE; - memset(cpuc, 0, sizeof(struct cpu_hw_events)); raw_spin_lock_init(&cpuc->pmu_lock); - return NOTIFY_OK; + return 0; } -static struct notifier_block metag_pmu_notifier = { - .notifier_call = metag_pmu_cpu_notify, -}; - /* PMU Initialisation */ static int __init init_hw_perf_events(void) { @@ -876,16 +867,13 @@ static int __init init_hw_perf_events(void) metag_out32(0, PERF_COUNT(0)); metag_out32(0, PERF_COUNT(1)); - for_each_possible_cpu(cpu) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + cpuhp_setup_state(CPUHP_AP_PERF_METAG_STARTING, + "AP_PERF_METAG_STARTING", metag_pmu_starting_cpu, + NULL); - memset(cpuc, 0, sizeof(struct cpu_hw_events)); - raw_spin_lock_init(&cpuc->pmu_lock); - } - - register_cpu_notifier(&metag_pmu_notifier); ret = perf_pmu_register(&pmu, metag_pmu->name, PERF_TYPE_RAW); -out: + if (ret) + cpuhp_remove_state_nocalls(CPUHP_AP_PERF_METAG_STARTING); return ret; } early_initcall(init_hw_perf_events); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 88986c979b0f..990052c9a2f8 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -34,6 +34,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CQM_STARTING, CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, + CPUHP_AP_PERF_METAG_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, -- cgit v1.2.3-59-g8ed1b From 9eeb2264775f0b60fcff3d186bf54c90b0191650 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:50 +0000 Subject: arm/l2c: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Brad Mouring Cc: Linus Torvalds Cc: Linus Walleij Cc: Marek Szyprowski Cc: Peter Zijlstra Cc: Rob Herring Cc: Russell King Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.801270887@linutronix.de Signed-off-by: Ingo Molnar --- arch/arm/mm/cache-l2x0.c | 27 +++++++++++++-------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index c61996c256cc..cc12905ae6f8 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -597,17 +597,16 @@ static void l2c310_configure(void __iomem *base) L310_POWER_CTRL); } -static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data) +static int l2c310_starting_cpu(unsigned int cpu) { - switch (act & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); - break; - case CPU_DYING: - set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); - break; - } - return NOTIFY_OK; + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); + return 0; +} + +static int l2c310_dying_cpu(unsigned int cpu) +{ + set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); + return 0; } static void __init l2c310_enable(void __iomem *base, unsigned num_lock) @@ -678,10 +677,10 @@ static void __init l2c310_enable(void __iomem *base, unsigned num_lock) power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); } - if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) { - set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); - cpu_notifier(l2c310_cpu_enable_flz, 0); - } + if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) + cpuhp_setup_state(CPUHP_AP_ARM_L2X0_STARTING, + "AP_ARM_L2X0_STARTING", l2c310_starting_cpu, + l2c310_dying_cpu); } static void __init l2c310_fixup(void __iomem *base, u32 cache_id, diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 990052c9a2f8..2057e64e4876 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -37,6 +37,7 @@ enum cpuhp_state { CPUHP_AP_PERF_METAG_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_PERF_ARM_STARTING, + CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_METAG_TIMER_STARTING, -- cgit v1.2.3-59-g8ed1b From 26b8768868c090151aa8c337773d429e0d5ca727 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:51 +0000 Subject: arm/twd: Convert to hotplug state machine Install the callbacks via the state machine. The callbacks won't be invoked on already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Daniel Lezcano Cc: Eric Anholt Cc: Felipe Balbi Cc: Heiko Stuebner Cc: Linus Torvalds Cc: Marc Gonzalez Cc: Peter Zijlstra Cc: Russell King Cc: Thomas Gleixner Cc: Vineet Gupta Cc: Viresh Kumar Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153336.881124821@linutronix.de Signed-off-by: Ingo Molnar --- arch/arm/kernel/smp_twd.c | 31 +++++++++++-------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 12 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c index b6ec65e68009..02d5e5e8d44c 100644 --- a/arch/arm/kernel/smp_twd.c +++ b/arch/arm/kernel/smp_twd.c @@ -310,24 +310,17 @@ static void twd_timer_setup(void) enable_percpu_irq(clk->irq, 0); } -static int twd_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) +static int twd_timer_starting_cpu(unsigned int cpu) { - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - twd_timer_setup(); - break; - case CPU_DYING: - twd_timer_stop(); - break; - } - - return NOTIFY_OK; + twd_timer_setup(); + return 0; } -static struct notifier_block twd_timer_cpu_nb = { - .notifier_call = twd_timer_cpu_notify, -}; +static int twd_timer_dying_cpu(unsigned int cpu) +{ + twd_timer_stop(); + return 0; +} static int __init twd_local_timer_common_register(struct device_node *np) { @@ -345,9 +338,9 @@ static int __init twd_local_timer_common_register(struct device_node *np) goto out_free; } - err = register_cpu_notifier(&twd_timer_cpu_nb); - if (err) - goto out_irq; + cpuhp_setup_state_nocalls(CPUHP_AP_ARM_TWD_STARTING, + "AP_ARM_TWD_STARTING", + twd_timer_starting_cpu, twd_timer_dying_cpu); twd_get_clock(np); if (!of_property_read_bool(np, "always-on")) @@ -365,8 +358,6 @@ static int __init twd_local_timer_common_register(struct device_node *np) return 0; -out_irq: - free_percpu_irq(twd_ppi, twd_evt); out_free: iounmap(twd_base); twd_base = NULL; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 2057e64e4876..ad48881ba02d 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -40,6 +40,7 @@ enum cpuhp_state { CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING, + CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_METAG_TIMER_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, -- cgit v1.2.3-59-g8ed1b From 4761adb6f49048cddd65553a71ea2354b2a838be Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:52 +0000 Subject: arm/xen: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. The get_cpu() in xen_starting_cpu() boils down to preempt_disable() since we already know the CPU we run on. Disabling preemption shouldn't be required here from what I see since it we don't switch CPUs while invoking the function. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Reviewed-by: Stefano Stabellini Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Russell King Cc: Stefano Stabellini Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/20160713153336.971559670@linutronix.de Signed-off-by: Ingo Molnar --- arch/arm/xen/enlighten.c | 41 +++++++++++------------------------------ include/linux/cpuhotplug.h | 1 + 2 files changed, 12 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 75cd7345c654..d822e2313950 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -161,12 +161,11 @@ static struct notifier_block xen_pvclock_gtod_notifier = { .notifier_call = xen_pvclock_gtod_notify, }; -static void xen_percpu_init(void) +static int xen_starting_cpu(unsigned int cpu) { struct vcpu_register_vcpu_info info; struct vcpu_info *vcpup; int err; - int cpu = get_cpu(); /* * VCPUOP_register_vcpu_info cannot be called twice for the same @@ -190,7 +189,13 @@ static void xen_percpu_init(void) after_register_vcpu_info: enable_percpu_irq(xen_events_irq, 0); - put_cpu(); + return 0; +} + +static int xen_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(xen_events_irq); + return 0; } static void xen_restart(enum reboot_mode reboot_mode, const char *cmd) @@ -209,28 +214,6 @@ static void xen_power_off(void) BUG_ON(rc); } -static int xen_cpu_notification(struct notifier_block *self, - unsigned long action, - void *hcpu) -{ - switch (action) { - case CPU_STARTING: - xen_percpu_init(); - break; - case CPU_DYING: - disable_percpu_irq(xen_events_irq); - break; - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block xen_cpu_notifier = { - .notifier_call = xen_cpu_notification, -}; - static irqreturn_t xen_arm_callback(int irq, void *arg) { xen_hvm_evtchn_do_upcall(); @@ -351,16 +334,14 @@ static int __init xen_guest_init(void) return -EINVAL; } - xen_percpu_init(); - - register_cpu_notifier(&xen_cpu_notifier); - pv_time_ops.steal_clock = xen_stolen_accounting; static_key_slow_inc(¶virt_steal_enabled); if (xen_initial_domain()) pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); - return 0; + return cpuhp_setup_state(CPUHP_AP_ARM_XEN_STARTING, + "AP_ARM_XEN_STARTING", xen_starting_cpu, + xen_dying_cpu); } early_initcall(xen_guest_init); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index ad48881ba02d..4c02b52dc77f 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -47,6 +47,7 @@ enum cpuhp_state { CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_STARTING, CPUHP_AP_KVM_ARM_TIMER_STARTING, + CPUHP_AP_ARM_XEN_STARTING, CPUHP_AP_LEDTRIG_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, -- cgit v1.2.3-59-g8ed1b From 65264e3bc3d479a87a3a9dbc6f3daee6439ce5c0 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:53 +0000 Subject: MIPS/Loongson-3: Convert oprofile to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Acked-by: Ralf Baechle Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Robert Richter Cc: Thomas Gleixner Cc: linux-mips@linux-mips.org Cc: oprofile-list@lists.sf.net Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.054827168@linutronix.de Signed-off-by: Ingo Molnar --- arch/mips/oprofile/op_model_loongson3.c | 35 +++++++++++++-------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 15 insertions(+), 21 deletions(-) (limited to 'include/linux') diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c index 8bcf7fc40f0d..85f3ee4ab456 100644 --- a/arch/mips/oprofile/op_model_loongson3.c +++ b/arch/mips/oprofile/op_model_loongson3.c @@ -168,33 +168,26 @@ static int loongson3_perfcount_handler(void) return handled; } -static int loongson3_cpu_callback(struct notifier_block *nfb, - unsigned long action, void *hcpu) +static int loongson3_starting_cpu(unsigned int cpu) { - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - write_c0_perflo1(reg.control1); - write_c0_perflo2(reg.control2); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - write_c0_perflo1(0xc0000000); - write_c0_perflo2(0x40000000); - break; - } - - return NOTIFY_OK; + write_c0_perflo1(reg.control1); + write_c0_perflo2(reg.control2); + return 0; } -static struct notifier_block loongson3_notifier_block = { - .notifier_call = loongson3_cpu_callback -}; +static int loongson3_dying_cpu(unsigned int cpu) +{ + write_c0_perflo1(0xc0000000); + write_c0_perflo2(0x40000000); + return 0; +} static int __init loongson3_init(void) { on_each_cpu(reset_counters, NULL, 1); - register_hotcpu_notifier(&loongson3_notifier_block); + cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, + "AP_MIPS_OP_LOONGSON3_STARTING", + loongson3_starting_cpu, loongson3_dying_cpu); save_perf_irq = perf_irq; perf_irq = loongson3_perfcount_handler; @@ -204,7 +197,7 @@ static int __init loongson3_init(void) static void loongson3_exit(void) { on_each_cpu(reset_counters, NULL, 1); - unregister_hotcpu_notifier(&loongson3_notifier_block); + cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING); perf_irq = save_perf_irq; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 4c02b52dc77f..ad26e353a5c0 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -35,6 +35,7 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_PERF_METAG_STARTING, + CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_L2X0_STARTING, -- cgit v1.2.3-59-g8ed1b From 2b5283d12f678391616718229aeabeca47497db5 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:54 +0000 Subject: hwtracing/coresight-etm3x: Convert to hotplug state machine This driver has an asymmetry of ONLINE code without any corresponding tear down code. Otherwise, this is a straightforward conversion. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Acked-by: Mathieu Poirier Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.147128995@linutronix.de Signed-off-by: Ingo Molnar --- drivers/hwtracing/coresight/coresight-etm3x.c | 90 +++++++++++++++------------ include/linux/cpuhotplug.h | 1 + 2 files changed, 50 insertions(+), 41 deletions(-) (limited to 'include/linux') diff --git a/drivers/hwtracing/coresight/coresight-etm3x.c b/drivers/hwtracing/coresight/coresight-etm3x.c index d83ab82672e4..2de4cad9c5ed 100644 --- a/drivers/hwtracing/coresight/coresight-etm3x.c +++ b/drivers/hwtracing/coresight/coresight-etm3x.c @@ -51,6 +51,8 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO); static int etm_count; static struct etm_drvdata *etmdrvdata[NR_CPUS]; +static enum cpuhp_state hp_online; + /* * Memory mapped writes to clear os lock are not supported on some processors * and OS lock must be unlocked before any memory mapped access on such @@ -481,8 +483,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev) /* * Configure the ETM only if the CPU is online. If it isn't online - * hw configuration will take place when 'CPU_STARTING' is received - * in @etm_cpu_callback. + * hw configuration will take place on the local CPU during bring up. */ if (cpu_online(drvdata->cpu)) { ret = smp_call_function_single(drvdata->cpu, @@ -641,47 +642,44 @@ static const struct coresight_ops etm_cs_ops = { .source_ops = &etm_source_ops, }; -static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int etm_online_cpu(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; - if (!etmdrvdata[cpu]) - goto out; + return 0; - switch (action & (~CPU_TASKS_FROZEN)) { - case CPU_STARTING: - spin_lock(&etmdrvdata[cpu]->spinlock); - if (!etmdrvdata[cpu]->os_unlock) { - etm_os_unlock(etmdrvdata[cpu]); - etmdrvdata[cpu]->os_unlock = true; - } - - if (local_read(&etmdrvdata[cpu]->mode)) - etm_enable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); - break; + if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) + coresight_enable(etmdrvdata[cpu]->csdev); + return 0; +} - case CPU_ONLINE: - if (etmdrvdata[cpu]->boot_enable && - !etmdrvdata[cpu]->sticky_enable) - coresight_enable(etmdrvdata[cpu]->csdev); - break; +static int etm_starting_cpu(unsigned int cpu) +{ + if (!etmdrvdata[cpu]) + return 0; - case CPU_DYING: - spin_lock(&etmdrvdata[cpu]->spinlock); - if (local_read(&etmdrvdata[cpu]->mode)) - etm_disable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); - break; + spin_lock(&etmdrvdata[cpu]->spinlock); + if (!etmdrvdata[cpu]->os_unlock) { + etm_os_unlock(etmdrvdata[cpu]); + etmdrvdata[cpu]->os_unlock = true; } -out: - return NOTIFY_OK; + + if (local_read(&etmdrvdata[cpu]->mode)) + etm_enable_hw(etmdrvdata[cpu]); + spin_unlock(&etmdrvdata[cpu]->spinlock); + return 0; } -static struct notifier_block etm_cpu_notifier = { - .notifier_call = etm_cpu_callback, -}; +static int etm_dying_cpu(unsigned int cpu) +{ + if (!etmdrvdata[cpu]) + return 0; + + spin_lock(&etmdrvdata[cpu]->spinlock); + if (local_read(&etmdrvdata[cpu]->mode)) + etm_disable_hw(etmdrvdata[cpu]); + spin_unlock(&etmdrvdata[cpu]->spinlock); + return 0; +} static bool etm_arch_supported(u8 arch) { @@ -806,9 +804,17 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) etm_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); - if (!etm_count++) - register_hotcpu_notifier(&etm_cpu_notifier); - + if (!etm_count++) { + cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING, + "AP_ARM_CORESIGHT_STARTING", + etm_starting_cpu, etm_dying_cpu); + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "AP_ARM_CORESIGHT_ONLINE", + etm_online_cpu, NULL); + if (ret < 0) + goto err_arch_supported; + hp_online = ret; + } put_online_cpus(); if (etm_arch_supported(drvdata->arch) == false) { @@ -839,7 +845,6 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) pm_runtime_put(&adev->dev); dev_info(dev, "%s initialized\n", (char *)id->data); - if (boot_enable) { coresight_enable(drvdata->csdev); drvdata->boot_enable = true; @@ -848,8 +853,11 @@ static int etm_probe(struct amba_device *adev, const struct amba_id *id) return 0; err_arch_supported: - if (--etm_count == 0) - unregister_hotcpu_notifier(&etm_cpu_notifier); + if (--etm_count == 0) { + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT_STARTING); + if (hp_online) + cpuhp_remove_state_nocalls(hp_online); + } return ret; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index ad26e353a5c0..576ad23dd50d 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -49,6 +49,7 @@ enum cpuhp_state { CPUHP_AP_KVM_ARM_VGIC_STARTING, CPUHP_AP_KVM_ARM_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, + CPUHP_AP_ARM_CORESIGHT_STARTING, CPUHP_AP_LEDTRIG_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, -- cgit v1.2.3-59-g8ed1b From 58eb457be0283d9bfc3024eeefea33715391443f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:55 +0000 Subject: hwtracing/coresight-etm4x: Convert to hotplug state machine This driver has an asymmetry of ONLINE code without any corresponding tear down code. Otherwise, this is a straightforward conversion. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Acked-by: Mathieu Poirier Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.228918408@linutronix.de Signed-off-by: Ingo Molnar --- drivers/hwtracing/coresight/coresight-etm4x.c | 87 +++++++++++++++------------ include/linux/cpuhotplug.h | 1 + 2 files changed, 50 insertions(+), 38 deletions(-) (limited to 'include/linux') diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c index 462f0dc15757..1a5e0d14c1dd 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.c +++ b/drivers/hwtracing/coresight/coresight-etm4x.c @@ -48,6 +48,8 @@ static int etm4_count; static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; static void etm4_set_default(struct etmv4_config *config); +static enum cpuhp_state hp_online; + static void etm4_os_unlock(struct etmv4_drvdata *drvdata) { /* Writing any value to ETMOSLAR unlocks the trace registers */ @@ -673,47 +675,44 @@ void etm4_config_trace_mode(struct etmv4_config *config) config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc; } -static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int etm4_online_cpu(unsigned int cpu) { - unsigned int cpu = (unsigned long)hcpu; - if (!etmdrvdata[cpu]) - goto out; - - switch (action & (~CPU_TASKS_FROZEN)) { - case CPU_STARTING: - spin_lock(&etmdrvdata[cpu]->spinlock); - if (!etmdrvdata[cpu]->os_unlock) { - etm4_os_unlock(etmdrvdata[cpu]); - etmdrvdata[cpu]->os_unlock = true; - } - - if (local_read(&etmdrvdata[cpu]->mode)) - etm4_enable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); - break; + return 0; - case CPU_ONLINE: - if (etmdrvdata[cpu]->boot_enable && - !etmdrvdata[cpu]->sticky_enable) - coresight_enable(etmdrvdata[cpu]->csdev); - break; + if (etmdrvdata[cpu]->boot_enable && !etmdrvdata[cpu]->sticky_enable) + coresight_enable(etmdrvdata[cpu]->csdev); + return 0; +} - case CPU_DYING: - spin_lock(&etmdrvdata[cpu]->spinlock); - if (local_read(&etmdrvdata[cpu]->mode)) - etm4_disable_hw(etmdrvdata[cpu]); - spin_unlock(&etmdrvdata[cpu]->spinlock); - break; +static int etm4_starting_cpu(unsigned int cpu) +{ + if (!etmdrvdata[cpu]) + return 0; + + spin_lock(&etmdrvdata[cpu]->spinlock); + if (!etmdrvdata[cpu]->os_unlock) { + etm4_os_unlock(etmdrvdata[cpu]); + etmdrvdata[cpu]->os_unlock = true; } -out: - return NOTIFY_OK; + + if (local_read(&etmdrvdata[cpu]->mode)) + etm4_enable_hw(etmdrvdata[cpu]); + spin_unlock(&etmdrvdata[cpu]->spinlock); + return 0; } -static struct notifier_block etm4_cpu_notifier = { - .notifier_call = etm4_cpu_callback, -}; +static int etm4_dying_cpu(unsigned int cpu) +{ + if (!etmdrvdata[cpu]) + return 0; + + spin_lock(&etmdrvdata[cpu]->spinlock); + if (local_read(&etmdrvdata[cpu]->mode)) + etm4_disable_hw(etmdrvdata[cpu]); + spin_unlock(&etmdrvdata[cpu]->spinlock); + return 0; +} static void etm4_init_trace_id(struct etmv4_drvdata *drvdata) { @@ -767,8 +766,17 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) etm4_init_arch_data, drvdata, 1)) dev_err(dev, "ETM arch init failed\n"); - if (!etm4_count++) - register_hotcpu_notifier(&etm4_cpu_notifier); + if (!etm4_count++) { + cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING, + "AP_ARM_CORESIGHT4_STARTING", + etm4_starting_cpu, etm4_dying_cpu); + ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "AP_ARM_CORESIGHT4_ONLINE", + etm4_online_cpu, NULL); + if (ret < 0) + goto err_arch_supported; + hp_online = ret; + } put_online_cpus(); @@ -809,8 +817,11 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id) return 0; err_arch_supported: - if (--etm4_count == 0) - unregister_hotcpu_notifier(&etm4_cpu_notifier); + if (--etm4_count == 0) { + cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CORESIGHT4_STARTING); + if (hp_online) + cpuhp_remove_state_nocalls(hp_online); + } return ret; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 576ad23dd50d..6cede6293806 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -50,6 +50,7 @@ enum cpuhp_state { CPUHP_AP_KVM_ARM_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, + CPUHP_AP_ARM_CORESIGHT4_STARTING, CPUHP_AP_LEDTRIG_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, -- cgit v1.2.3-59-g8ed1b From 27c01a8c73e15b2135b96a1866e1dbeb797e4806 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:56 +0000 Subject: arm64/armv8 deprecated: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: James Morse Cc: Jisheng Zhang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Shengjiu Wang Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.311115906@linutronix.de Signed-off-by: Ingo Molnar --- arch/arm64/kernel/armv8_deprecated.c | 22 +++++----------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 6 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/arch/arm64/kernel/armv8_deprecated.c b/arch/arm64/kernel/armv8_deprecated.c index c37202c0c838..9668c230674a 100644 --- a/arch/arm64/kernel/armv8_deprecated.c +++ b/arch/arm64/kernel/armv8_deprecated.c @@ -121,7 +121,7 @@ static int run_all_cpu_set_hw_mode(struct insn_emulation *insn, bool enable) * 0 - If all the hooks ran successfully. * -EINVAL - At least one hook is not supported by the CPU. */ -static int run_all_insn_set_hw_mode(unsigned long cpu) +static int run_all_insn_set_hw_mode(unsigned int cpu) { int rc = 0; unsigned long flags; @@ -131,7 +131,7 @@ static int run_all_insn_set_hw_mode(unsigned long cpu) list_for_each_entry(insn, &insn_emulation, node) { bool enable = (insn->current_mode == INSN_HW); if (insn->ops->set_hw_mode && insn->ops->set_hw_mode(enable)) { - pr_warn("CPU[%ld] cannot support the emulation of %s", + pr_warn("CPU[%u] cannot support the emulation of %s", cpu, insn->ops->name); rc = -EINVAL; } @@ -617,20 +617,6 @@ static struct insn_emulation_ops setend_ops = { .set_hw_mode = setend_set_hw_mode, }; -static int insn_cpu_hotplug_notify(struct notifier_block *b, - unsigned long action, void *hcpu) -{ - int rc = 0; - if ((action & ~CPU_TASKS_FROZEN) == CPU_STARTING) - rc = run_all_insn_set_hw_mode((unsigned long)hcpu); - - return notifier_from_errno(rc); -} - -static struct notifier_block insn_cpu_hotplug_notifier = { - .notifier_call = insn_cpu_hotplug_notify, -}; - /* * Invoked as late_initcall, since not needed before init spawned. */ @@ -649,7 +635,9 @@ static int __init armv8_deprecated_init(void) pr_info("setend instruction emulation is not supported on the system"); } - register_cpu_notifier(&insn_cpu_hotplug_notifier); + cpuhp_setup_state_nocalls(CPUHP_AP_ARM64_ISNDEP_STARTING, + "AP_ARM64_ISNDEP_STARTING", + run_all_insn_set_hw_mode, NULL); register_insn_emulation_sysctl(ctl_abi); return 0; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 6cede6293806..c5b96d0b6fbf 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -51,6 +51,7 @@ enum cpuhp_state { CPUHP_AP_ARM_XEN_STARTING, CPUHP_AP_ARM_CORESIGHT_STARTING, CPUHP_AP_ARM_CORESIGHT4_STARTING, + CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_LEDTRIG_STARTING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, -- cgit v1.2.3-59-g8ed1b From ae6a8a2ed721ecdbc15d32b2089af5ed2190adc7 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:57 +0000 Subject: x86/tboot: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Gang Wei Cc: Linus Torvalds Cc: Ning Sun Cc: Peter Zijlstra Cc: Richard L Maliszewski Cc: Shane Wang Cc: Thomas Gleixner Cc: rt@linutronix.de Cc: tboot-devel@lists.sourceforge.net Link: http://lkml.kernel.org/r/20160713153337.400227322@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/tboot.c | 25 ++++++++----------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 9 insertions(+), 17 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 9b0185fbe3eb..654f6c66fe45 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c @@ -323,25 +323,16 @@ static int tboot_wait_for_aps(int num_aps) return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); } -static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) +static int tboot_dying_cpu(unsigned int cpu) { - switch (action) { - case CPU_DYING: - atomic_inc(&ap_wfs_count); - if (num_online_cpus() == 1) - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) - return NOTIFY_BAD; - break; + atomic_inc(&ap_wfs_count); + if (num_online_cpus() == 1) { + if (tboot_wait_for_aps(atomic_read(&ap_wfs_count))) + return -EBUSY; } - return NOTIFY_OK; + return 0; } -static struct notifier_block tboot_cpu_notifier = -{ - .notifier_call = tboot_cpu_callback, -}; - #ifdef CONFIG_DEBUG_FS #define TBOOT_LOG_UUID { 0x26, 0x25, 0x19, 0xc0, 0x30, 0x6b, 0xb4, 0x4d, \ @@ -417,8 +408,8 @@ static __init int tboot_late_init(void) tboot_create_trampoline(); atomic_set(&ap_wfs_count, 0); - register_hotcpu_notifier(&tboot_cpu_notifier); - + cpuhp_setup_state(CPUHP_AP_X86_TBOOT_DYING, "AP_X86_TBOOT_DYING", NULL, + tboot_dying_cpu); #ifdef CONFIG_DEBUG_FS debugfs_create_file("tboot_log", S_IRUSR, arch_debugfs_dir, NULL, &tboot_log_fops); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index c5b96d0b6fbf..070cc7f28862 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -53,6 +53,7 @@ enum cpuhp_state { CPUHP_AP_ARM_CORESIGHT4_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_LEDTRIG_STARTING, + CPUHP_AP_X86_TBOOT_DYING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, CPUHP_TEARDOWN_CPU, -- cgit v1.2.3-59-g8ed1b From 27590dc17b34aedc4f3e14bd107ee59b9db9b0a6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 15 Jul 2016 10:41:04 +0200 Subject: hrtimer: Convert to hotplug state machine Split out the clockevents callbacks instead of piggybacking them on hrtimers. This gets rid of a POST_DEAD user. See commit: 54e88fad223c ("sched: Make sure timers have migrated before killing the migration_thread") We just move the callback state to the proper place in the state machine. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Rasmus Villemoes Cc: Rusty Russell Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.485419196@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 1 + include/linux/hrtimer.h | 7 +++++++ kernel/cpu.c | 5 +++++ kernel/time/hrtimer.c | 40 +++++----------------------------------- 4 files changed, 18 insertions(+), 35 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 070cc7f28862..04ecc55009ed 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -15,6 +15,7 @@ enum cpuhp_state { CPUHP_X86_HPET_DEAD, CPUHP_X86_APB_DEAD, CPUHP_WORKQUEUE_PREP, + CPUHP_HRTIMERS_PREPARE, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index c98c6539e2c2..5e00f80b1535 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -494,4 +494,11 @@ extern void __init hrtimers_init(void); /* Show pending timers: */ extern void sysrq_timer_list_show(void); +int hrtimers_prepare_cpu(unsigned int cpu); +#ifdef CONFIG_HOTPLUG_CPU +int hrtimers_dead_cpu(unsigned int cpu); +#else +#define hrtimers_dead_cpu NULL +#endif + #endif diff --git a/kernel/cpu.c b/kernel/cpu.c index af53f820fec9..85500e7fe238 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1190,6 +1190,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { .startup = workqueue_prepare_cpu, .teardown = NULL, }, + [CPUHP_HRTIMERS_PREPARE] = { + .name = "hrtimers prepare", + .startup = hrtimers_prepare_cpu, + .teardown = hrtimers_dead_cpu, + }, /* * Preparatory and dead notifiers. Will be replaced once the notifiers * are converted to states. diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index d13c9aebf7a3..9ba7c820fc23 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1590,7 +1590,7 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, /* * Functions related to boot-time initialization: */ -static void init_hrtimers_cpu(int cpu) +int hrtimers_prepare_cpu(unsigned int cpu) { struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; @@ -1602,6 +1602,7 @@ static void init_hrtimers_cpu(int cpu) cpu_base->cpu = cpu; hrtimer_init_hres(cpu_base); + return 0; } #ifdef CONFIG_HOTPLUG_CPU @@ -1636,7 +1637,7 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, } } -static void migrate_hrtimers(int scpu) +int hrtimers_dead_cpu(unsigned int scpu) { struct hrtimer_cpu_base *old_base, *new_base; int i; @@ -1665,45 +1666,14 @@ static void migrate_hrtimers(int scpu) /* Check, if we got expired work to do */ __hrtimer_peek_ahead_timers(); local_irq_enable(); + return 0; } #endif /* CONFIG_HOTPLUG_CPU */ -static int hrtimer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - int scpu = (long)hcpu; - - switch (action) { - - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - init_hrtimers_cpu(scpu); - break; - -#ifdef CONFIG_HOTPLUG_CPU - case CPU_DEAD: - case CPU_DEAD_FROZEN: - migrate_hrtimers(scpu); - break; -#endif - - default: - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block hrtimers_nb = { - .notifier_call = hrtimer_cpu_notify, -}; - void __init hrtimers_init(void) { - hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, - (void *)(long)smp_processor_id()); - register_cpu_notifier(&hrtimers_nb); + hrtimers_prepare_cpu(smp_processor_id()); } /** -- cgit v1.2.3-59-g8ed1b From 24f73b99716a9cd8cbb345c41ced6b3b5ed94006 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:16:59 +0000 Subject: timers/core: Convert to hotplug state machine When tearing down, call timers_dead_cpu() before notify_dead(). There is a hidden dependency between: - timers - block multiqueue - rcutree If timers_dead_cpu() comes later than blk_mq_queue_reinit_notify() that latter function causes a RCU stall. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: John Stultz Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Rasmus Villemoes Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.566790058@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 1 + include/linux/timer.h | 6 ++++++ kernel/cpu.c | 5 +++++ kernel/time/timer.c | 25 ++----------------------- 4 files changed, 14 insertions(+), 23 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 04ecc55009ed..15d46d2a1d16 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -16,6 +16,7 @@ enum cpuhp_state { CPUHP_X86_APB_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_HRTIMERS_PREPARE, + CPUHP_TIMERS_DEAD, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, diff --git a/include/linux/timer.h b/include/linux/timer.h index 4419506b564e..51d601f192d4 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -273,4 +273,10 @@ unsigned long __round_jiffies_up_relative(unsigned long j, int cpu); unsigned long round_jiffies_up(unsigned long j); unsigned long round_jiffies_up_relative(unsigned long j); +#ifdef CONFIG_HOTPLUG_CPU +int timers_dead_cpu(unsigned int cpu); +#else +#define timers_dead_cpu NULL +#endif + #endif diff --git a/kernel/cpu.c b/kernel/cpu.c index 85500e7fe238..e1017d92d308 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1195,6 +1195,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { .startup = hrtimers_prepare_cpu, .teardown = hrtimers_dead_cpu, }, + [CPUHP_TIMERS_DEAD] = { + .name = "timers dead", + .startup = NULL, + .teardown = timers_dead_cpu, + }, /* * Preparatory and dead notifiers. Will be replaced once the notifiers * are converted to states. diff --git a/kernel/time/timer.c b/kernel/time/timer.c index cb9ab401e2d9..555670a5143c 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1804,7 +1804,7 @@ static void migrate_timer_list(struct timer_base *new_base, struct hlist_head *h } } -static void migrate_timers(int cpu) +int timers_dead_cpu(unsigned int cpu) { struct timer_base *old_base; struct timer_base *new_base; @@ -1831,29 +1831,9 @@ static void migrate_timers(int cpu) spin_unlock_irq(&new_base->lock); put_cpu_ptr(&timer_bases); } + return 0; } -static int timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - switch (action) { - case CPU_DEAD: - case CPU_DEAD_FROZEN: - migrate_timers((long)hcpu); - break; - default: - break; - } - - return NOTIFY_OK; -} - -static inline void timer_register_cpu_notifier(void) -{ - cpu_notifier(timer_cpu_notify, 0); -} -#else -static inline void timer_register_cpu_notifier(void) { } #endif /* CONFIG_HOTPLUG_CPU */ static void __init init_timer_cpu(int cpu) @@ -1881,7 +1861,6 @@ void __init init_timers(void) { init_timer_cpus(); init_timer_stats(); - timer_register_cpu_notifier(); open_softirq(TIMER_SOFTIRQ, run_timer_softirq); } -- cgit v1.2.3-59-g8ed1b From e722d8daafb974b9ad1bbaf42f384a5ea5929f5f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:16:59 +0000 Subject: profile: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. A lot of code is removed because the for-loop is used and create_hash_tables() is removed since its purpose is covered by the startup / teardown hooks. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Andrew Morton Cc: Arnd Bergmann Cc: Linus Torvalds Cc: Mel Gorman Cc: Michal Hocko Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vlastimil Babka Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.649867675@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 1 + kernel/profile.c | 181 ++++++++++++++++----------------------------- 2 files changed, 66 insertions(+), 116 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 15d46d2a1d16..ace5ad0fc3ec 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -16,6 +16,7 @@ enum cpuhp_state { CPUHP_X86_APB_DEAD, CPUHP_WORKQUEUE_PREP, CPUHP_HRTIMERS_PREPARE, + CPUHP_PROFILE_PREPARE, CPUHP_TIMERS_DEAD, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, diff --git a/kernel/profile.c b/kernel/profile.c index c2199e9901c9..2dbccf2d806c 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -328,68 +328,57 @@ out: put_cpu(); } -static int profile_cpu_callback(struct notifier_block *info, - unsigned long action, void *__cpu) +static int profile_dead_cpu(unsigned int cpu) { - int node, cpu = (unsigned long)__cpu; struct page *page; + int i; - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - node = cpu_to_mem(cpu); - per_cpu(cpu_profile_flip, cpu) = 0; - if (!per_cpu(cpu_profile_hits, cpu)[1]) { - page = __alloc_pages_node(node, - GFP_KERNEL | __GFP_ZERO, - 0); - if (!page) - return notifier_from_errno(-ENOMEM); - per_cpu(cpu_profile_hits, cpu)[1] = page_address(page); - } - if (!per_cpu(cpu_profile_hits, cpu)[0]) { - page = __alloc_pages_node(node, - GFP_KERNEL | __GFP_ZERO, - 0); - if (!page) - goto out_free; - per_cpu(cpu_profile_hits, cpu)[0] = page_address(page); - } - break; -out_free: - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); - per_cpu(cpu_profile_hits, cpu)[1] = NULL; - __free_page(page); - return notifier_from_errno(-ENOMEM); - case CPU_ONLINE: - case CPU_ONLINE_FROZEN: - if (prof_cpu_mask != NULL) - cpumask_set_cpu(cpu, prof_cpu_mask); - break; - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - case CPU_DEAD: - case CPU_DEAD_FROZEN: - if (prof_cpu_mask != NULL) - cpumask_clear_cpu(cpu, prof_cpu_mask); - if (per_cpu(cpu_profile_hits, cpu)[0]) { - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); - per_cpu(cpu_profile_hits, cpu)[0] = NULL; + if (prof_cpu_mask != NULL) + cpumask_clear_cpu(cpu, prof_cpu_mask); + + for (i = 0; i < 2; i++) { + if (per_cpu(cpu_profile_hits, cpu)[i]) { + page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); + per_cpu(cpu_profile_hits, cpu)[i] = NULL; __free_page(page); } - if (per_cpu(cpu_profile_hits, cpu)[1]) { - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); - per_cpu(cpu_profile_hits, cpu)[1] = NULL; - __free_page(page); + } + return 0; +} + +static int profile_prepare_cpu(unsigned int cpu) +{ + int i, node = cpu_to_mem(cpu); + struct page *page; + + per_cpu(cpu_profile_flip, cpu) = 0; + + for (i = 0; i < 2; i++) { + if (per_cpu(cpu_profile_hits, cpu)[i]) + continue; + + page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); + if (!page) { + profile_dead_cpu(cpu); + return -ENOMEM; } - break; + per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); + } - return NOTIFY_OK; + return 0; +} + +static int profile_online_cpu(unsigned int cpu) +{ + if (prof_cpu_mask != NULL) + cpumask_set_cpu(cpu, prof_cpu_mask); + + return 0; } + #else /* !CONFIG_SMP */ #define profile_flip_buffers() do { } while (0) #define profile_discard_flip_buffers() do { } while (0) -#define profile_cpu_callback NULL static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) { @@ -531,83 +520,43 @@ static const struct file_operations proc_profile_operations = { .llseek = default_llseek, }; -#ifdef CONFIG_SMP -static void profile_nop(void *unused) -{ -} - -static int create_hash_tables(void) +int __ref create_proc_profile(void) { - int cpu; - - for_each_online_cpu(cpu) { - int node = cpu_to_mem(cpu); - struct page *page; - - page = __alloc_pages_node(node, - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, - 0); - if (!page) - goto out_cleanup; - per_cpu(cpu_profile_hits, cpu)[1] - = (struct profile_hit *)page_address(page); - page = __alloc_pages_node(node, - GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, - 0); - if (!page) - goto out_cleanup; - per_cpu(cpu_profile_hits, cpu)[0] - = (struct profile_hit *)page_address(page); - } - return 0; -out_cleanup: - prof_on = 0; - smp_mb(); - on_each_cpu(profile_nop, NULL, 1); - for_each_online_cpu(cpu) { - struct page *page; - - if (per_cpu(cpu_profile_hits, cpu)[0]) { - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[0]); - per_cpu(cpu_profile_hits, cpu)[0] = NULL; - __free_page(page); - } - if (per_cpu(cpu_profile_hits, cpu)[1]) { - page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[1]); - per_cpu(cpu_profile_hits, cpu)[1] = NULL; - __free_page(page); - } - } - return -1; -} -#else -#define create_hash_tables() ({ 0; }) + struct proc_dir_entry *entry; +#ifdef CONFIG_SMP + enum cpuhp_state online_state; #endif -int __ref create_proc_profile(void) /* false positive from hotcpu_notifier */ -{ - struct proc_dir_entry *entry; int err = 0; if (!prof_on) return 0; - - cpu_notifier_register_begin(); - - if (create_hash_tables()) { - err = -ENOMEM; - goto out; - } - +#ifdef CONFIG_SMP + err = cpuhp_setup_state(CPUHP_PROFILE_PREPARE, "PROFILE_PREPARE", + profile_prepare_cpu, profile_dead_cpu); + if (err) + return err; + + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_PROFILE_ONLINE", + profile_online_cpu, NULL); + if (err < 0) + goto err_state_prep; + online_state = err; + err = 0; +#endif entry = proc_create("profile", S_IWUSR | S_IRUGO, NULL, &proc_profile_operations); if (!entry) - goto out; + goto err_state_onl; proc_set_size(entry, (1 + prof_len) * sizeof(atomic_t)); - __hotcpu_notifier(profile_cpu_callback, 0); -out: - cpu_notifier_register_done(); + return err; +err_state_onl: +#ifdef CONFIG_SMP + cpuhp_remove_state(online_state); +err_state_prep: + cpuhp_remove_state(CPUHP_PROFILE_PREPARE); +#endif return err; } subsys_initcall(create_proc_profile); -- cgit v1.2.3-59-g8ed1b From 6b2c28471de550308784560206c3365e5179d42f Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 13 Jul 2016 17:17:00 +0000 Subject: x86/x2apic: Convert to CPU hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Anna-Maria Gleixner Cc: Len Brown Cc: Linus Torvalds Cc: Mathias Krause Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.736898691@linutronix.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/x2apic_cluster.c | 80 +++++++++++++---------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 31 insertions(+), 50 deletions(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index aca8b75c1552..b5da5a8e5e45 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c @@ -152,68 +152,48 @@ static void init_x2apic_ldr(void) } } - /* - * At CPU state changes, update the x2apic cluster sibling info. - */ -static int -update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) +/* + * At CPU state changes, update the x2apic cluster sibling info. + */ +int x2apic_prepare_cpu(unsigned int cpu) { - unsigned int this_cpu = (unsigned long)hcpu; - unsigned int cpu; - int err = 0; - - switch (action) { - case CPU_UP_PREPARE: - if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, this_cpu), - GFP_KERNEL)) { - err = -ENOMEM; - } else if (!zalloc_cpumask_var(&per_cpu(ipi_mask, this_cpu), - GFP_KERNEL)) { - free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); - err = -ENOMEM; - } - break; - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - case CPU_DEAD: - for_each_online_cpu(cpu) { - if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) - continue; - cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); - cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); - } - free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); - free_cpumask_var(per_cpu(ipi_mask, this_cpu)); - break; + if (!zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL)) + return -ENOMEM; + + if (!zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL)) { + free_cpumask_var(per_cpu(cpus_in_cluster, cpu)); + return -ENOMEM; } - return notifier_from_errno(err); + return 0; } -static struct notifier_block x2apic_cpu_notifier = { - .notifier_call = update_clusterinfo, -}; - -static int x2apic_init_cpu_notifier(void) +int x2apic_dead_cpu(unsigned int this_cpu) { - int cpu = smp_processor_id(); - - zalloc_cpumask_var(&per_cpu(cpus_in_cluster, cpu), GFP_KERNEL); - zalloc_cpumask_var(&per_cpu(ipi_mask, cpu), GFP_KERNEL); + int cpu; - BUG_ON(!per_cpu(cpus_in_cluster, cpu) || !per_cpu(ipi_mask, cpu)); - - cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); - register_hotcpu_notifier(&x2apic_cpu_notifier); - return 1; + for_each_online_cpu(cpu) { + if (x2apic_cluster(this_cpu) != x2apic_cluster(cpu)) + continue; + cpumask_clear_cpu(this_cpu, per_cpu(cpus_in_cluster, cpu)); + cpumask_clear_cpu(cpu, per_cpu(cpus_in_cluster, this_cpu)); + } + free_cpumask_var(per_cpu(cpus_in_cluster, this_cpu)); + free_cpumask_var(per_cpu(ipi_mask, this_cpu)); + return 0; } static int x2apic_cluster_probe(void) { - if (x2apic_mode) - return x2apic_init_cpu_notifier(); - else + int cpu = smp_processor_id(); + + if (!x2apic_mode) return 0; + + cpumask_set_cpu(cpu, per_cpu(cpus_in_cluster, cpu)); + cpuhp_setup_state(CPUHP_X2APIC_PREPARE, "X2APIC_PREPARE", + x2apic_prepare_cpu, x2apic_dead_cpu); + return 1; } static const struct cpumask *x2apic_cluster_target_cpus(void) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index ace5ad0fc3ec..78170827a776 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -17,6 +17,7 @@ enum cpuhp_state { CPUHP_WORKQUEUE_PREP, CPUHP_HRTIMERS_PREPARE, CPUHP_PROFILE_PREPARE, + CPUHP_X2APIC_PREPARE, CPUHP_TIMERS_DEAD, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, -- cgit v1.2.3-59-g8ed1b From 31487f8328f20fdb302430b020a5d6e8446c1971 Mon Sep 17 00:00:00 2001 From: Richard Weinberger Date: Wed, 13 Jul 2016 17:17:01 +0000 Subject: smp/cfd: Convert core to hotplug state machine Install the callbacks via the state machine. They are installed at runtime so smpcfd_prepare_cpu() needs to be invoked by the boot-CPU. Signed-off-by: Richard Weinberger [ Added the dropped CPU dying case back in. ] Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Davidlohr Bueso Cc: Linus Torvalds Cc: Mel Gorman Cc: Oleg Nesterov Cc: Peter Zijlstra Cc: Rasmus Villemoes Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.818376366@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 2 ++ include/linux/smp.h | 5 +++ kernel/cpu.c | 9 ++++++ kernel/smp.c | 79 +++++++++++++++++++--------------------------- 4 files changed, 48 insertions(+), 47 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 78170827a776..b5cf01ace71b 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -18,6 +18,7 @@ enum cpuhp_state { CPUHP_HRTIMERS_PREPARE, CPUHP_PROFILE_PREPARE, CPUHP_X2APIC_PREPARE, + CPUHP_SMPCFD_PREPARE, CPUHP_TIMERS_DEAD, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, @@ -57,6 +58,7 @@ enum cpuhp_state { CPUHP_AP_ARM_CORESIGHT4_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, CPUHP_AP_LEDTRIG_STARTING, + CPUHP_AP_SMPCFD_DYING, CPUHP_AP_X86_TBOOT_DYING, CPUHP_AP_NOTIFY_STARTING, CPUHP_AP_ONLINE, diff --git a/include/linux/smp.h b/include/linux/smp.h index c4414074bd88..eccae4690f41 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -196,4 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void); void smp_setup_processor_id(void); +/* SMP core functions */ +int smpcfd_prepare_cpu(unsigned int cpu); +int smpcfd_dead_cpu(unsigned int cpu); +int smpcfd_dying_cpu(unsigned int cpu); + #endif /* __LINUX_SMP_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index e1017d92d308..008e2fd40cb1 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1195,6 +1195,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { .startup = hrtimers_prepare_cpu, .teardown = hrtimers_dead_cpu, }, + [CPUHP_SMPCFD_PREPARE] = { + .name = "SMPCFD prepare", + .startup = smpcfd_prepare_cpu, + .teardown = smpcfd_dead_cpu, + }, [CPUHP_TIMERS_DEAD] = { .name = "timers dead", .startup = NULL, @@ -1218,6 +1223,10 @@ static struct cpuhp_step cpuhp_bp_states[] = { .teardown = NULL, .cant_stop = true, }, + [CPUHP_AP_SMPCFD_DYING] = { + .startup = NULL, + .teardown = smpcfd_dying_cpu, + }, /* * Handled on controll processor until the plugged processor manages * this itself. diff --git a/kernel/smp.c b/kernel/smp.c index 74165443c240..7180491c9678 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -33,69 +33,54 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); static void flush_smp_call_function_queue(bool warn_cpu_offline); -static int -hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) +int smpcfd_prepare_cpu(unsigned int cpu) { - long cpu = (long)hcpu; struct call_function_data *cfd = &per_cpu(cfd_data, cpu); - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, - cpu_to_node(cpu))) - return notifier_from_errno(-ENOMEM); - cfd->csd = alloc_percpu(struct call_single_data); - if (!cfd->csd) { - free_cpumask_var(cfd->cpumask); - return notifier_from_errno(-ENOMEM); - } - break; - -#ifdef CONFIG_HOTPLUG_CPU - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - /* Fall-through to the CPU_DEAD[_FROZEN] case. */ - - case CPU_DEAD: - case CPU_DEAD_FROZEN: + if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL, + cpu_to_node(cpu))) + return -ENOMEM; + cfd->csd = alloc_percpu(struct call_single_data); + if (!cfd->csd) { free_cpumask_var(cfd->cpumask); - free_percpu(cfd->csd); - break; + return -ENOMEM; + } - case CPU_DYING: - case CPU_DYING_FROZEN: - /* - * The IPIs for the smp-call-function callbacks queued by other - * CPUs might arrive late, either due to hardware latencies or - * because this CPU disabled interrupts (inside stop-machine) - * before the IPIs were sent. So flush out any pending callbacks - * explicitly (without waiting for the IPIs to arrive), to - * ensure that the outgoing CPU doesn't go offline with work - * still pending. - */ - flush_smp_call_function_queue(false); - break; -#endif - }; + return 0; +} + +int smpcfd_dead_cpu(unsigned int cpu) +{ + struct call_function_data *cfd = &per_cpu(cfd_data, cpu); - return NOTIFY_OK; + free_cpumask_var(cfd->cpumask); + free_percpu(cfd->csd); + return 0; } -static struct notifier_block hotplug_cfd_notifier = { - .notifier_call = hotplug_cfd, -}; +int smpcfd_dying_cpu(unsigned int cpu) +{ + /* + * The IPIs for the smp-call-function callbacks queued by other + * CPUs might arrive late, either due to hardware latencies or + * because this CPU disabled interrupts (inside stop-machine) + * before the IPIs were sent. So flush out any pending callbacks + * explicitly (without waiting for the IPIs to arrive), to + * ensure that the outgoing CPU doesn't go offline with work + * still pending. + */ + flush_smp_call_function_queue(false); + return 0; +} void __init call_function_init(void) { - void *cpu = (void *)(long)smp_processor_id(); int i; for_each_possible_cpu(i) init_llist_head(&per_cpu(call_single_queue, i)); - hotplug_cfd(&hotplug_cfd_notifier, CPU_UP_PREPARE, cpu); - register_cpu_notifier(&hotplug_cfd_notifier); + smpcfd_prepare_cpu(smp_processor_id()); } /* -- cgit v1.2.3-59-g8ed1b From 15d7e3d349a329ad48a29c3a818eacd1c3f7e3ef Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Wed, 13 Jul 2016 17:17:02 +0000 Subject: KVM/arm/arm64/vgic-new: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Anna-Maria Gleixner Cc: Andre Przywara Cc: Christoffer Dall Cc: Eric Auger Cc: Linus Torvalds Cc: Marc Zyngier Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Radim Krcmar Cc: Thomas Gleixner Cc: kvm@vger.kernel.org Cc: kvmarm@lists.cs.columbia.edu Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.900484868@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 1 + virt/kvm/arm/vgic/vgic-init.c | 31 +++++++++---------------------- 2 files changed, 10 insertions(+), 22 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index b5cf01ace71b..544b5563720e 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -51,6 +51,7 @@ enum cpuhp_state { CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_KVM_STARTING, + CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, CPUHP_AP_KVM_ARM_VGIC_STARTING, CPUHP_AP_KVM_ARM_TIMER_STARTING, CPUHP_AP_ARM_XEN_STARTING, diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c index a1442f7c9c4d..2c7f0d5a62ea 100644 --- a/virt/kvm/arm/vgic/vgic-init.c +++ b/virt/kvm/arm/vgic/vgic-init.c @@ -353,32 +353,19 @@ out: /* GENERIC PROBE */ -static void vgic_init_maintenance_interrupt(void *info) +static int vgic_init_cpu_starting(unsigned int cpu) { enable_percpu_irq(kvm_vgic_global_state.maint_irq, 0); + return 0; } -static int vgic_cpu_notify(struct notifier_block *self, - unsigned long action, void *cpu) -{ - switch (action) { - case CPU_STARTING: - case CPU_STARTING_FROZEN: - vgic_init_maintenance_interrupt(NULL); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - disable_percpu_irq(kvm_vgic_global_state.maint_irq); - break; - } - return NOTIFY_OK; +static int vgic_init_cpu_dying(unsigned int cpu) +{ + disable_percpu_irq(kvm_vgic_global_state.maint_irq); + return 0; } -static struct notifier_block vgic_cpu_nb = { - .notifier_call = vgic_cpu_notify, -}; - static irqreturn_t vgic_maintenance_handler(int irq, void *data) { /* @@ -434,14 +421,14 @@ int kvm_vgic_hyp_init(void) return ret; } - ret = __register_cpu_notifier(&vgic_cpu_nb); + ret = cpuhp_setup_state(CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, + "AP_KVM_ARM_VGIC_INIT_STARTING", + vgic_init_cpu_starting, vgic_init_cpu_dying); if (ret) { kvm_err("Cannot register vgic CPU notifier\n"); goto out_free_irq; } - on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1); - kvm_info("vgic interrupt IRQ%d\n", kvm_vgic_global_state.maint_irq); return 0; -- cgit v1.2.3-59-g8ed1b From 4df8374254ea9294dfe4b8c447a1b7eddc543dbf Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 13 Jul 2016 17:17:03 +0000 Subject: rcu: Convert rcutree to hotplug state machine Straight forward conversion to the state machine. Though the question arises whether this needs really all these state transitions to work. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Linus Torvalds Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153337.982013161@linutronix.de Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 3 ++ include/linux/rcutiny.h | 7 +++ include/linux/rcutree.h | 7 +++ kernel/cpu.c | 14 ++++++ kernel/rcu/tree.c | 105 ++++++++++++++++++++++----------------------- 5 files changed, 83 insertions(+), 53 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 544b5563720e..201a2e23bc49 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -20,11 +20,13 @@ enum cpuhp_state { CPUHP_X2APIC_PREPARE, CPUHP_SMPCFD_PREPARE, CPUHP_TIMERS_DEAD, + CPUHP_RCUTREE_PREP, CPUHP_NOTIFY_PREPARE, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, CPUHP_AP_SCHED_STARTING, + CPUHP_AP_RCUTREE_DYING, CPUHP_AP_IRQ_GIC_STARTING, CPUHP_AP_IRQ_GICV3_STARTING, CPUHP_AP_IRQ_HIP04_STARTING, @@ -80,6 +82,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_CCI_ONLINE, CPUHP_AP_PERF_ARM_CCN_ONLINE, CPUHP_AP_WORKQUEUE_ONLINE, + CPUHP_AP_RCUTREE_ONLINE, CPUHP_AP_NOTIFY_ONLINE, CPUHP_AP_ONLINE_DYN, CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30, diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 93aea75029fb..ac81e4063b40 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -243,4 +243,11 @@ static inline void rcu_all_qs(void) barrier(); /* Avoid RCU read-side critical sections leaking across. */ } +/* RCUtree hotplug events */ +#define rcutree_prepare_cpu NULL +#define rcutree_online_cpu NULL +#define rcutree_offline_cpu NULL +#define rcutree_dead_cpu NULL +#define rcutree_dying_cpu NULL + #endif /* __LINUX_RCUTINY_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 5043cb823fb2..63a4e4cf40a5 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -111,4 +111,11 @@ bool rcu_is_watching(void); void rcu_all_qs(void); +/* RCUtree hotplug events */ +int rcutree_prepare_cpu(unsigned int cpu); +int rcutree_online_cpu(unsigned int cpu); +int rcutree_offline_cpu(unsigned int cpu); +int rcutree_dead_cpu(unsigned int cpu); +int rcutree_dying_cpu(unsigned int cpu); + #endif /* __LINUX_RCUTREE_H */ diff --git a/kernel/cpu.c b/kernel/cpu.c index 008e2fd40cb1..f24f45915b54 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1205,6 +1205,11 @@ static struct cpuhp_step cpuhp_bp_states[] = { .startup = NULL, .teardown = timers_dead_cpu, }, + [CPUHP_RCUTREE_PREP] = { + .name = "RCU-tree prepare", + .startup = rcutree_prepare_cpu, + .teardown = rcutree_dead_cpu, + }, /* * Preparatory and dead notifiers. Will be replaced once the notifiers * are converted to states. @@ -1263,6 +1268,10 @@ static struct cpuhp_step cpuhp_ap_states[] = { .startup = sched_cpu_starting, .teardown = sched_cpu_dying, }, + [CPUHP_AP_RCUTREE_DYING] = { + .startup = NULL, + .teardown = rcutree_dying_cpu, + }, /* * Low level startup/teardown notifiers. Run with interrupts * disabled. Will be removed once the notifiers are converted to @@ -1296,6 +1305,11 @@ static struct cpuhp_step cpuhp_ap_states[] = { .startup = workqueue_online_cpu, .teardown = workqueue_offline_cpu, }, + [CPUHP_AP_RCUTREE_ONLINE] = { + .name = "RCU-tree online", + .startup = rcutree_online_cpu, + .teardown = rcutree_offline_cpu, + }, /* * Online/down_prepare notifiers. Will be removed once the notifiers diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index f433959e9322..5d80925e7fc8 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -1073,11 +1073,11 @@ EXPORT_SYMBOL_GPL(rcu_is_watching); * offline to continue to use RCU for one jiffy after marking itself * offline in the cpu_online_mask. This leniency is necessary given the * non-atomic nature of the online and offline processing, for example, - * the fact that a CPU enters the scheduler after completing the CPU_DYING - * notifiers. + * the fact that a CPU enters the scheduler after completing the teardown + * of the CPU. * - * This is also why RCU internally marks CPUs online during the - * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase. + * This is also why RCU internally marks CPUs online during in the + * preparation phase and offline after the CPU has been taken down. * * Disable checking if in an NMI handler because we cannot safely report * errors from NMI handlers anyway. @@ -3806,12 +3806,58 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp) raw_spin_unlock_irqrestore_rcu_node(rnp, flags); } -static void rcu_prepare_cpu(int cpu) +int rcutree_prepare_cpu(unsigned int cpu) { struct rcu_state *rsp; for_each_rcu_flavor(rsp) rcu_init_percpu_data(cpu, rsp); + + rcu_prepare_kthreads(cpu); + rcu_spawn_all_nocb_kthreads(cpu); + + return 0; +} + +static void rcutree_affinity_setting(unsigned int cpu, int outgoing) +{ + struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); + + rcu_boost_kthread_setaffinity(rdp->mynode, outgoing); +} + +int rcutree_online_cpu(unsigned int cpu) +{ + sync_sched_exp_online_cleanup(cpu); + rcutree_affinity_setting(cpu, -1); + return 0; +} + +int rcutree_offline_cpu(unsigned int cpu) +{ + rcutree_affinity_setting(cpu, cpu); + return 0; +} + + +int rcutree_dying_cpu(unsigned int cpu) +{ + struct rcu_state *rsp; + + for_each_rcu_flavor(rsp) + rcu_cleanup_dying_cpu(rsp); + return 0; +} + +int rcutree_dead_cpu(unsigned int cpu) +{ + struct rcu_state *rsp; + + for_each_rcu_flavor(rsp) { + rcu_cleanup_dead_cpu(cpu, rsp); + do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); + } + return 0; } #ifdef CONFIG_HOTPLUG_CPU @@ -3851,52 +3897,6 @@ void rcu_report_dead(unsigned int cpu) } #endif -/* - * Handle CPU online/offline notification events. - */ -int rcu_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - long cpu = (long)hcpu; - struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu); - struct rcu_node *rnp = rdp->mynode; - struct rcu_state *rsp; - - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - rcu_prepare_cpu(cpu); - rcu_prepare_kthreads(cpu); - rcu_spawn_all_nocb_kthreads(cpu); - break; - case CPU_ONLINE: - case CPU_DOWN_FAILED: - sync_sched_exp_online_cleanup(cpu); - rcu_boost_kthread_setaffinity(rnp, -1); - break; - case CPU_DOWN_PREPARE: - rcu_boost_kthread_setaffinity(rnp, cpu); - break; - case CPU_DYING: - case CPU_DYING_FROZEN: - for_each_rcu_flavor(rsp) - rcu_cleanup_dying_cpu(rsp); - break; - case CPU_DEAD: - case CPU_DEAD_FROZEN: - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - for_each_rcu_flavor(rsp) { - rcu_cleanup_dead_cpu(cpu, rsp); - do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu)); - } - break; - default: - break; - } - return NOTIFY_OK; -} - static int rcu_pm_notify(struct notifier_block *self, unsigned long action, void *hcpu) { @@ -4208,10 +4208,9 @@ void __init rcu_init(void) * this is called early in boot, before either interrupts * or the scheduler are operational. */ - cpu_notifier(rcu_cpu_notify, 0); pm_notifier(rcu_pm_notify, 0); for_each_online_cpu(cpu) - rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu); + rcutree_prepare_cpu(cpu); } #include "tree_exp.h" -- cgit v1.2.3-59-g8ed1b From b8a12296ac812f829bd9392346b01b1872d8118b Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:17:04 +0000 Subject: clocksource/arm_global_timer: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Daniel Lezcano Cc: Linus Torvalds Cc: Maxime Coquelin Cc: Patrice Chotard Cc: Peter Zijlstra Cc: Srinivas Kandagatla Cc: Thomas Gleixner Cc: kernel@stlinux.com Cc: linux-arm-kernel@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153338.062741642@linutronix.de Signed-off-by: Ingo Molnar --- drivers/clocksource/arm_global_timer.c | 39 +++++++++------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 11 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index 2a9ceb6e93f9..8da03298f844 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -165,9 +165,9 @@ static irqreturn_t gt_clockevent_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static int gt_clockevents_init(struct clock_event_device *clk) +static int gt_starting_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); + struct clock_event_device *clk = this_cpu_ptr(gt_evt); clk->name = "arm_global_timer"; clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | @@ -186,10 +186,13 @@ static int gt_clockevents_init(struct clock_event_device *clk) return 0; } -static void gt_clockevents_stop(struct clock_event_device *clk) +static int gt_dying_cpu(unsigned int cpu) { + struct clock_event_device *clk = this_cpu_ptr(gt_evt); + gt_clockevent_shutdown(clk); disable_percpu_irq(clk->irq); + return 0; } static cycle_t gt_clocksource_read(struct clocksource *cs) @@ -252,24 +255,6 @@ static int __init gt_clocksource_init(void) return clocksource_register_hz(>_clocksource, gt_clk_rate); } -static int gt_cpu_notify(struct notifier_block *self, unsigned long action, - void *hcpu) -{ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - gt_clockevents_init(this_cpu_ptr(gt_evt)); - break; - case CPU_DYING: - gt_clockevents_stop(this_cpu_ptr(gt_evt)); - break; - } - - return NOTIFY_OK; -} -static struct notifier_block gt_cpu_nb = { - .notifier_call = gt_cpu_notify, -}; - static int __init global_timer_of_register(struct device_node *np) { struct clk *gt_clk; @@ -325,18 +310,14 @@ static int __init global_timer_of_register(struct device_node *np) goto out_free; } - err = register_cpu_notifier(>_cpu_nb); - if (err) { - pr_warn("global-timer: unable to register cpu notifier.\n"); - goto out_irq; - } - - /* Immediately configure the timer on the boot CPU */ + /* Register and immediately configure the timer on the boot CPU */ err = gt_clocksource_init(); if (err) goto out_irq; - err = gt_clockevents_init(this_cpu_ptr(gt_evt)); + err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, + "AP_ARM_GLOBAL_TIMER_STARTING", + gt_starting_cpu, gt_dying_cpu); if (err) goto out_irq; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 201a2e23bc49..d24b0fc2f88d 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -47,6 +47,7 @@ enum cpuhp_state { CPUHP_AP_PERF_ARM_STARTING, CPUHP_AP_ARM_L2X0_STARTING, CPUHP_AP_ARM_ARCH_TIMER_STARTING, + CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_METAG_TIMER_STARTING, -- cgit v1.2.3-59-g8ed1b From d11b3a60fcc8c6e44dd370f1cf2738eb5bec7353 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:17:05 +0000 Subject: clocksource/exynos_mct: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Daniel Lezcano Cc: Krzysztof Kozlowski Cc: Kukjin Kim Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-samsung-soc@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153338.147940411@linutronix.de Signed-off-by: Ingo Molnar --- drivers/clocksource/exynos_mct.c | 46 +++++++++++----------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 13 insertions(+), 34 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 0d18dd4b3bd2..41840d02c331 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -443,10 +443,11 @@ static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) +static int exynos4_mct_starting_cpu(unsigned int cpu) { + struct mct_clock_event_device *mevt = + per_cpu_ptr(&percpu_mct_tick, cpu); struct clock_event_device *evt = &mevt->evt; - unsigned int cpu = smp_processor_id(); mevt->base = EXYNOS4_MCT_L_BASE(cpu); snprintf(mevt->name, sizeof(mevt->name), "mct_tick%d", cpu); @@ -480,8 +481,10 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) return 0; } -static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) +static int exynos4_mct_dying_cpu(unsigned int cpu) { + struct mct_clock_event_device *mevt = + per_cpu_ptr(&percpu_mct_tick, cpu); struct clock_event_device *evt = &mevt->evt; evt->set_state_shutdown(evt); @@ -491,39 +494,12 @@ static void exynos4_local_timer_stop(struct mct_clock_event_device *mevt) } else { disable_percpu_irq(mct_irqs[MCT_L0_IRQ]); } + return 0; } -static int exynos4_mct_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - struct mct_clock_event_device *mevt; - - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - mevt = this_cpu_ptr(&percpu_mct_tick); - exynos4_local_timer_setup(mevt); - break; - case CPU_DYING: - mevt = this_cpu_ptr(&percpu_mct_tick); - exynos4_local_timer_stop(mevt); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block exynos4_mct_cpu_nb = { - .notifier_call = exynos4_mct_cpu_notify, -}; - static int __init exynos4_timer_resources(struct device_node *np, void __iomem *base) { int err, cpu; - struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); struct clk *mct_clk, *tick_clk; tick_clk = np ? of_clk_get_by_name(np, "fin_pll") : @@ -570,12 +546,14 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * } } - err = register_cpu_notifier(&exynos4_mct_cpu_nb); + /* Install hotplug callbacks which configure the timer on this CPU */ + err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, + "AP_EXYNOS4_MCT_TIMER_STARTING", + exynos4_mct_starting_cpu, + exynos4_mct_dying_cpu); if (err) goto out_irq; - /* Immediately configure the timer on the boot CPU */ - exynos4_local_timer_setup(mevt); return 0; out_irq: diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d24b0fc2f88d..7ad8a7e7806d 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -49,6 +49,7 @@ enum cpuhp_state { CPUHP_AP_ARM_ARCH_TIMER_STARTING, CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, CPUHP_AP_DUMMY_TIMER_STARTING, + CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_METAG_TIMER_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, -- cgit v1.2.3-59-g8ed1b From 2c48fef74ca42d05c3efd4345b8a068ebf479a5f Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:17:06 +0000 Subject: clocksource/armada-370-xp: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Daniel Lezcano Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153338.229913786@linutronix.de Signed-off-by: Ingo Molnar --- drivers/clocksource/time-armada-370-xp.c | 41 +++++++++----------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 12 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 20ec066481fe..719b478d136e 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -170,10 +170,10 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) /* * Setup the local clock events for a CPU. */ -static int armada_370_xp_timer_setup(struct clock_event_device *evt) +static int armada_370_xp_timer_starting_cpu(unsigned int cpu) { + struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); u32 clr = 0, set = 0; - int cpu = smp_processor_id(); if (timer25Mhz) set = TIMER0_25MHZ; @@ -200,35 +200,15 @@ static int armada_370_xp_timer_setup(struct clock_event_device *evt) return 0; } -static void armada_370_xp_timer_stop(struct clock_event_device *evt) +static int armada_370_xp_timer_dying_cpu(unsigned int cpu) { + struct clock_event_device *evt = per_cpu_ptr(armada_370_xp_evt, cpu); + evt->set_state_shutdown(evt); disable_percpu_irq(evt->irq); + return 0; } -static int armada_370_xp_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); - break; - case CPU_DYING: - armada_370_xp_timer_stop(this_cpu_ptr(armada_370_xp_evt)); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block armada_370_xp_timer_cpu_nb = { - .notifier_call = armada_370_xp_timer_cpu_notify, -}; - static u32 timer0_ctrl_reg, timer0_local_ctrl_reg; static int armada_370_xp_timer_suspend(void) @@ -322,8 +302,6 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np) return res; } - register_cpu_notifier(&armada_370_xp_timer_cpu_nb); - armada_370_xp_evt = alloc_percpu(struct clock_event_device); if (!armada_370_xp_evt) return -ENOMEM; @@ -341,9 +319,12 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np) return res; } - res = armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); + res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, + "AP_ARMADA_TIMER_STARTING", + armada_370_xp_timer_starting_cpu, + armada_370_xp_timer_dying_cpu); if (res) { - pr_err("Failed to setup timer"); + pr_err("Failed to setup hotplug state and timer"); return res; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 7ad8a7e7806d..ee7f750b1633 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -53,6 +53,7 @@ enum cpuhp_state { CPUHP_AP_ARM_TWD_STARTING, CPUHP_AP_METAG_TIMER_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, + CPUHP_AP_ARMADA_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, -- cgit v1.2.3-59-g8ed1b From eb0a9d8c672dc01db41352afa646405d035ee7a4 Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 13 Jul 2016 17:17:07 +0000 Subject: clocksource/atlas7: Convert to hotplug state machine Install the callbacks via the state machine and let the core invoke the callbacks on the already online CPUs. Signed-off-by: Richard Cochran Signed-off-by: Anna-Maria Gleixner Reviewed-by: Sebastian Andrzej Siewior Cc: Barry Song Cc: Daniel Lezcano Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153338.310333816@linutronix.de Signed-off-by: Ingo Molnar --- drivers/clocksource/timer-atlas7.c | 41 +++++++++----------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 10 insertions(+), 32 deletions(-) (limited to 'include/linux') diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c index 90f8fbc154a4..4334e0330ada 100644 --- a/drivers/clocksource/timer-atlas7.c +++ b/drivers/clocksource/timer-atlas7.c @@ -172,9 +172,9 @@ static struct irqaction sirfsoc_timer1_irq = { .handler = sirfsoc_timer_interrupt, }; -static int sirfsoc_local_timer_setup(struct clock_event_device *ce) +static int sirfsoc_local_timer_starting_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); + struct clock_event_device *ce = per_cpu_ptr(sirfsoc_clockevent, cpu); struct irqaction *action; if (cpu == 0) @@ -203,50 +203,27 @@ static int sirfsoc_local_timer_setup(struct clock_event_device *ce) return 0; } -static void sirfsoc_local_timer_stop(struct clock_event_device *ce) +static int sirfsoc_local_timer_dying_cpu(unsigned int cpu) { - int cpu = smp_processor_id(); - sirfsoc_timer_count_disable(1); if (cpu == 0) remove_irq(sirfsoc_timer_irq.irq, &sirfsoc_timer_irq); else remove_irq(sirfsoc_timer1_irq.irq, &sirfsoc_timer1_irq); + return 0; } -static int sirfsoc_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -{ - /* - * Grab cpu pointer in each case to avoid spurious - * preemptible warnings - */ - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); - break; - case CPU_DYING: - sirfsoc_local_timer_stop(this_cpu_ptr(sirfsoc_clockevent)); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block sirfsoc_cpu_nb = { - .notifier_call = sirfsoc_cpu_notify, -}; - static int __init sirfsoc_clockevent_init(void) { sirfsoc_clockevent = alloc_percpu(struct clock_event_device); BUG_ON(!sirfsoc_clockevent); - BUG_ON(register_cpu_notifier(&sirfsoc_cpu_nb)); - - /* Immediately configure the timer on the boot CPU */ - return sirfsoc_local_timer_setup(this_cpu_ptr(sirfsoc_clockevent)); + /* Install and invoke hotplug callbacks */ + return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING, + "AP_MARCO_TIMER_STARTING", + sirfsoc_local_timer_starting_cpu, + sirfsoc_local_timer_dying_cpu); } /* initialize the kernel jiffy timer source */ diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index ee7f750b1633..5ac59f7aafce 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -54,6 +54,7 @@ enum cpuhp_state { CPUHP_AP_METAG_TIMER_STARTING, CPUHP_AP_QCOM_TIMER_STARTING, CPUHP_AP_ARMADA_TIMER_STARTING, + CPUHP_AP_MARCO_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, -- cgit v1.2.3-59-g8ed1b From ecd8081f6fb9032c41e5c38885f06836da6ab455 Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Wed, 13 Jul 2016 17:17:07 +0000 Subject: ARC/time: Convert to hotplug state machine Install the callbacks via the state machine. Signed-off-by: Anna-Maria Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Vineet Gupta Cc: linux-snps-arc@lists.infradead.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160713153338.391826254@linutronix.de Signed-off-by: Ingo Molnar --- arch/arc/kernel/time.c | 48 +++++++++++++++++----------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 19 insertions(+), 30 deletions(-) (limited to 'include/linux') diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c index 98f22d2eb563..f927b8dc6edd 100644 --- a/arch/arc/kernel/time.c +++ b/arch/arc/kernel/time.c @@ -296,30 +296,23 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static int arc_timer_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) + +static int arc_timer_starting_cpu(unsigned int cpu) { struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); evt->cpumask = cpumask_of(smp_processor_id()); - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_STARTING: - clockevents_config_and_register(evt, arc_timer_freq, - 0, ULONG_MAX); - enable_percpu_irq(arc_timer_irq, 0); - break; - case CPU_DYING: - disable_percpu_irq(arc_timer_irq); - break; - } - - return NOTIFY_OK; + clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMER_MAX); + enable_percpu_irq(arc_timer_irq, 0); + return 0; } -static struct notifier_block arc_timer_cpu_nb = { - .notifier_call = arc_timer_cpu_notify, -}; +static int arc_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(arc_timer_irq); + return 0; +} /* * clockevent setup for boot CPU @@ -329,12 +322,6 @@ static int __init arc_clockevent_setup(struct device_node *node) struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); int ret; - ret = register_cpu_notifier(&arc_timer_cpu_nb); - if (ret) { - pr_err("Failed to register cpu notifier"); - return ret; - } - arc_timer_irq = irq_of_parse_and_map(node, 0); if (arc_timer_irq <= 0) { pr_err("clockevent: missing irq"); @@ -347,11 +334,6 @@ static int __init arc_clockevent_setup(struct device_node *node) return ret; } - evt->irq = arc_timer_irq; - evt->cpumask = cpumask_of(smp_processor_id()); - clockevents_config_and_register(evt, arc_timer_freq, - 0, ARC_TIMER_MAX); - /* Needs apriori irq_set_percpu_devid() done in intc map function */ ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, "Timer0 (per-cpu-tick)", evt); @@ -360,8 +342,14 @@ static int __init arc_clockevent_setup(struct device_node *node) return ret; } - enable_percpu_irq(arc_timer_irq, 0); - + ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, + "AP_ARC_TIMER_STARTING", + arc_timer_starting_cpu, + arc_timer_dying_cpu); + if (ret) { + pr_err("Failed to setup hotplug state"); + return ret; + } return 0; } diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 5ac59f7aafce..09ef54bcba39 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -56,6 +56,7 @@ enum cpuhp_state { CPUHP_AP_ARMADA_TIMER_STARTING, CPUHP_AP_MARCO_TIMER_STARTING, CPUHP_AP_MIPS_GIC_TIMER_STARTING, + CPUHP_AP_ARC_TIMER_STARTING, CPUHP_AP_KVM_STARTING, CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING, CPUHP_AP_KVM_ARM_VGIC_STARTING, -- cgit v1.2.3-59-g8ed1b From 37b502f121adab26ccc2769c3063f0e1272be7de Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jul 2016 09:51:11 +0200 Subject: arm/perf: Fix hotplug state machine conversion Mark Rutland pointed out that this commit is incomplete: 7d88eb695a1f ("arm/perf: Convert to hotplug state machine") The problem is that: > We may have multiple PMUs (e.g. two in big.LITTLE systems), and > __oprofile_cpu_pmu only contains one of these. So this conversion is not > correct. > > We were relying on the notifier list implicitly containing a list of > those PMUs. It seems like we need an explicit list here. > > We keep __oprofile_cpu_pmu around for legacy 32-bit users of OProfile > (on non-hetereogeneous systems), and that's all that the variable should > be used for. Introduce arm_pmu_list to correctly handle multiple PMUs in the system. Signed-off-by: Sebastian Andrzej Siewior Acked-by: Mark Rutland Cc: Anna-Maria Gleixner Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Will Deacon Cc: linux-tip-commits@vger.kernel.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160719111733.GA22911@linutronix.de Signed-off-by: Ingo Molnar --- drivers/perf/arm_pmu.c | 53 +++++++++++++++++++++++++++++++------------- include/linux/perf/arm_pmu.h | 1 + 2 files changed, 38 insertions(+), 16 deletions(-) (limited to 'include/linux') diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index ae9fc6c6add3..f6ab4f7f75bf 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c @@ -685,6 +685,9 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) return 0; } +static DEFINE_MUTEX(arm_pmu_mutex); +static LIST_HEAD(arm_pmu_list); + /* * PMU hardware loses all context when a CPU goes offline. * When a CPU is hotplugged back in, since some hardware registers are @@ -693,12 +696,17 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) */ static int arm_perf_starting_cpu(unsigned int cpu) { - if (!__oprofile_cpu_pmu) - return 0; - if (!cpumask_test_cpu(cpu, &__oprofile_cpu_pmu->supported_cpus)) - return 0; - if (__oprofile_cpu_pmu->reset) - __oprofile_cpu_pmu->reset(__oprofile_cpu_pmu); + struct arm_pmu *pmu; + + mutex_lock(&arm_pmu_mutex); + list_for_each_entry(pmu, &arm_pmu_list, entry) { + + if (!cpumask_test_cpu(cpu, &pmu->supported_cpus)) + continue; + if (pmu->reset) + pmu->reset(pmu); + } + mutex_unlock(&arm_pmu_mutex); return 0; } @@ -810,11 +818,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) if (!cpu_hw_events) return -ENOMEM; - err = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING, - "AP_PERF_ARM_STARTING", - arm_perf_starting_cpu, NULL); - if (err) - goto out_hw_events; + mutex_lock(&arm_pmu_mutex); + list_add_tail(&cpu_pmu->entry, &arm_pmu_list); + mutex_unlock(&arm_pmu_mutex); err = cpu_pm_pmu_register(cpu_pmu); if (err) @@ -850,8 +856,9 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu) return 0; out_unregister: - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_STARTING); -out_hw_events: + mutex_lock(&arm_pmu_mutex); + list_del(&cpu_pmu->entry); + mutex_unlock(&arm_pmu_mutex); free_percpu(cpu_hw_events); return err; } @@ -859,7 +866,9 @@ out_hw_events: static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) { cpu_pm_pmu_unregister(cpu_pmu); - cpuhp_remove_state_nocalls(CPUHP_AP_PERF_ARM_STARTING); + mutex_lock(&arm_pmu_mutex); + list_del(&cpu_pmu->entry); + mutex_unlock(&arm_pmu_mutex); free_percpu(cpu_pmu->hw_events); } @@ -1019,8 +1028,6 @@ int arm_pmu_device_probe(struct platform_device *pdev, if (ret) goto out_destroy; - WARN(__oprofile_cpu_pmu, "%s(): missing PMU strucure for CPU-hotplug\n", - __func__); if (!__oprofile_cpu_pmu) __oprofile_cpu_pmu = pmu; @@ -1038,3 +1045,17 @@ out_free: kfree(pmu); return ret; } + +static int arm_pmu_hp_init(void) +{ + int ret; + + ret = cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_STARTING, + "AP_PERF_ARM_STARTING", + arm_perf_starting_cpu, NULL); + if (ret) + pr_err("CPU hotplug notifier for ARM PMU could not be registered: %d\n", + ret); + return ret; +} +subsys_initcall(arm_pmu_hp_init); diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index e6ed34eb2b2b..e18843809eec 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -109,6 +109,7 @@ struct arm_pmu { DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); struct platform_device *plat_device; struct pmu_hw_events __percpu *hw_events; + struct list_head entry; struct notifier_block cpu_pm_nb; }; -- cgit v1.2.3-59-g8ed1b From bdab88e006504cd83fac98705814485cbe3ef5b4 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Mon, 18 Jul 2016 16:07:28 +0200 Subject: powerpc/numa: Convert to hotplug state machine Install the callbacks via the state machine. On the boot cpu the callback is invoked manually because cpuhp is not up yet and everything must be preinitialized before additional CPUs are up. Signed-off-by: Sebastian Andrzej Siewior Cc: Nikunj A Dadhania Cc: Peter Zijlstra Cc: Benjamin Herrenschmidt Cc: Bharata B Rao Cc: Raghavendra K T Cc: Linus Torvalds Cc: Christophe Jaillet Cc: Anton Blanchard Cc: Michael Ellerman Cc: Paul Mackerras Cc: Andrew Morton Cc: linuxppc-dev@lists.ozlabs.org Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160718140727.GA13132@linutronix.de Signed-off-by: Thomas Gleixner --- arch/powerpc/mm/numa.c | 48 ++++++++++++++++++---------------------------- include/linux/cpuhotplug.h | 1 + 2 files changed, 20 insertions(+), 29 deletions(-) (limited to 'include/linux') diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 669a15e7fa76..6dc07ddbfd04 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -581,30 +581,22 @@ static void verify_cpu_node_mapping(int cpu, int node) } } -static int cpu_numa_callback(struct notifier_block *nfb, unsigned long action, - void *hcpu) +/* Must run before sched domains notifier. */ +static int ppc_numa_cpu_prepare(unsigned int cpu) { - unsigned long lcpu = (unsigned long)hcpu; - int ret = NOTIFY_DONE, nid; + int nid; - switch (action) { - case CPU_UP_PREPARE: - case CPU_UP_PREPARE_FROZEN: - nid = numa_setup_cpu(lcpu); - verify_cpu_node_mapping((int)lcpu, nid); - ret = NOTIFY_OK; - break; + nid = numa_setup_cpu(cpu); + verify_cpu_node_mapping(cpu, nid); + return 0; +} + +static int ppc_numa_cpu_dead(unsigned int cpu) +{ #ifdef CONFIG_HOTPLUG_CPU - case CPU_DEAD: - case CPU_DEAD_FROZEN: - case CPU_UP_CANCELED: - case CPU_UP_CANCELED_FROZEN: - unmap_cpu_from_node(lcpu); - ret = NOTIFY_OK; - break; + unmap_cpu_from_node(cpu); #endif - } - return ret; + return 0; } /* @@ -913,11 +905,6 @@ static void __init dump_numa_memory_topology(void) } } -static struct notifier_block ppc64_numa_nb = { - .notifier_call = cpu_numa_callback, - .priority = 1 /* Must run before sched domains notifier. */ -}; - /* Initialize NODE_DATA for a node on the local memory */ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) { @@ -985,15 +972,18 @@ void __init initmem_init(void) setup_node_to_cpumask_map(); reset_numa_cpu_lookup_table(); - register_cpu_notifier(&ppc64_numa_nb); + /* * We need the numa_cpu_lookup_table to be accurate for all CPUs, * even before we online them, so that we can use cpu_to_{node,mem} * early in boot, cf. smp_prepare_cpus(). + * _nocalls() + manual invocation is used because cpuhp is not yet + * initialized for the boot CPU. */ - for_each_present_cpu(cpu) { - numa_setup_cpu((unsigned long)cpu); - } + cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "POWER_NUMA_PREPARE", + ppc_numa_cpu_prepare, ppc_numa_cpu_dead); + for_each_present_cpu(cpu) + numa_setup_cpu(cpu); } static int __init early_numa(char *p) diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 09ef54bcba39..5015f463d314 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -15,6 +15,7 @@ enum cpuhp_state { CPUHP_X86_HPET_DEAD, CPUHP_X86_APB_DEAD, CPUHP_WORKQUEUE_PREP, + CPUHP_POWER_NUMA_PREPARE, CPUHP_HRTIMERS_PREPARE, CPUHP_PROFILE_PREPARE, CPUHP_X2APIC_PREPARE, -- cgit v1.2.3-59-g8ed1b From cd894f149732a06ba4b2ccdc4cb86edf7ff68620 Mon Sep 17 00:00:00 2001 From: Sebastian Andrzej Siewior Date: Wed, 20 Jul 2016 17:24:55 +0200 Subject: leds/trigger/cpu: Move from CPU_STARTING to ONLINE level There is no need the ledtriger to be called *that* early in the hotplug process (+ with disabled interrupts). As explained by Jacek Anaszewski [0] there is no need for it. Therefore this patch moves it to the ONLINE/PREPARE_DOWN level using the dynamic registration for the id. [0] https://lkml.kernel.org/r/578C92BC.2070603@samsung.com Signed-off-by: Sebastian Andrzej Siewior Acked-by: Jacek Anaszewski Acked-by: Linus Walleij Cc: Peter Zijlstra Cc: Paul Gortmaker Cc: Richard Purdie Cc: rt@linutronix.de Cc: Linus Torvalds Cc: linux-leds@vger.kernel.org Link: http://lkml.kernel.org/r/1469028295-14702-1-git-send-email-bigeasy@linutronix.de Signed-off-by: Thomas Gleixner --- drivers/leds/trigger/ledtrig-cpu.c | 16 ++++++++-------- include/linux/cpuhotplug.h | 1 - 2 files changed, 8 insertions(+), 9 deletions(-) (limited to 'include/linux') diff --git a/drivers/leds/trigger/ledtrig-cpu.c b/drivers/leds/trigger/ledtrig-cpu.c index 4a6a182d0a88..22f0634dd3fa 100644 --- a/drivers/leds/trigger/ledtrig-cpu.c +++ b/drivers/leds/trigger/ledtrig-cpu.c @@ -92,13 +92,13 @@ static struct syscore_ops ledtrig_cpu_syscore_ops = { .resume = ledtrig_cpu_syscore_resume, }; -static int ledtrig_starting_cpu(unsigned int cpu) +static int ledtrig_online_cpu(unsigned int cpu) { ledtrig_cpu(CPU_LED_START); return 0; } -static int ledtrig_dying_cpu(unsigned int cpu) +static int ledtrig_prepare_down_cpu(unsigned int cpu) { ledtrig_cpu(CPU_LED_STOP); return 0; @@ -107,6 +107,7 @@ static int ledtrig_dying_cpu(unsigned int cpu) static int __init ledtrig_cpu_init(void) { int cpu; + int ret; /* Supports up to 9999 cpu cores */ BUILD_BUG_ON(CONFIG_NR_CPUS > 9999); @@ -126,12 +127,11 @@ static int __init ledtrig_cpu_init(void) register_syscore_ops(&ledtrig_cpu_syscore_ops); - /* - * FIXME: Why needs this to happen in the interrupt disabled - * low level bringup phase of a cpu? - */ - cpuhp_setup_state(CPUHP_AP_LEDTRIG_STARTING, "AP_LEDTRIG_STARTING", - ledtrig_starting_cpu, ledtrig_dying_cpu); + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "AP_LEDTRIG_STARTING", + ledtrig_online_cpu, ledtrig_prepare_down_cpu); + if (ret < 0) + pr_err("CPU hotplug notifier for ledtrig-cpu could not be registered: %d\n", + ret); pr_info("ledtrig-cpu: registered to indicate activity on CPUs\n"); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 5015f463d314..6d405db6fb12 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -66,7 +66,6 @@ enum cpuhp_state { CPUHP_AP_ARM_CORESIGHT_STARTING, CPUHP_AP_ARM_CORESIGHT4_STARTING, CPUHP_AP_ARM64_ISNDEP_STARTING, - CPUHP_AP_LEDTRIG_STARTING, CPUHP_AP_SMPCFD_DYING, CPUHP_AP_X86_TBOOT_DYING, CPUHP_AP_NOTIFY_STARTING, -- cgit v1.2.3-59-g8ed1b From 4fae16dffb812f0e0d98a0b2b0856ca48ca63e6c Mon Sep 17 00:00:00 2001 From: Richard Cochran Date: Wed, 27 Jul 2016 11:08:18 +0200 Subject: timers/core: Correct callback order during CPU hot plug On the tear-down path, the dead CPU callback for the timers was misplaced within the 'cpuhp_state' enumeration. There is a hidden dependency between the timers and block multiqueue. The timers callback must happen before the block multiqueue callback otherwise a RCU stall occurs. Move the timers callback to the proper place in the state machine. Reported-and-tested-by: Jon Hunter Reported-by: kbuild test robot Fixes: 24f73b99716a ("timers/core: Convert to hotplug state machine") Signed-off-by: Richard Cochran Cc: Peter Zijlstra Cc: Sebastian Andrzej Siewior Cc: Rasmus Villemoes Cc: John Stultz Cc: rt@linutronix.de Cc: Oleg Nesterov Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1469610498-25914-1-git-send-email-rcochran@linutronix.de Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- include/linux/cpuhotplug.h | 2 +- kernel/cpu.c | 15 ++++++++++----- 2 files changed, 11 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 6d405db6fb12..242bf530edfc 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -20,9 +20,9 @@ enum cpuhp_state { CPUHP_PROFILE_PREPARE, CPUHP_X2APIC_PREPARE, CPUHP_SMPCFD_PREPARE, - CPUHP_TIMERS_DEAD, CPUHP_RCUTREE_PREP, CPUHP_NOTIFY_PREPARE, + CPUHP_TIMERS_DEAD, CPUHP_BRINGUP_CPU, CPUHP_AP_IDLE_DEAD, CPUHP_AP_OFFLINE, diff --git a/kernel/cpu.c b/kernel/cpu.c index f24f45915b54..341bf80f80bd 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -1200,11 +1200,6 @@ static struct cpuhp_step cpuhp_bp_states[] = { .startup = smpcfd_prepare_cpu, .teardown = smpcfd_dead_cpu, }, - [CPUHP_TIMERS_DEAD] = { - .name = "timers dead", - .startup = NULL, - .teardown = timers_dead_cpu, - }, [CPUHP_RCUTREE_PREP] = { .name = "RCU-tree prepare", .startup = rcutree_prepare_cpu, @@ -1221,6 +1216,16 @@ static struct cpuhp_step cpuhp_bp_states[] = { .skip_onerr = true, .cant_stop = true, }, + /* + * On the tear-down path, timers_dead_cpu() must be invoked + * before blk_mq_queue_reinit_notify() from notify_dead(), + * otherwise a RCU stall occurs. + */ + [CPUHP_TIMERS_DEAD] = { + .name = "timers dead", + .startup = NULL, + .teardown = timers_dead_cpu, + }, /* Kicks the plugged cpu into life */ [CPUHP_BRINGUP_CPU] = { .name = "cpu:bringup", -- cgit v1.2.3-59-g8ed1b