aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smp.c
diff options
context:
space:
mode:
authorNadav Amit <namit@vmware.com>2021-02-20 15:17:12 -0800
committerIngo Molnar <mingo@kernel.org>2021-03-06 12:59:10 +0100
commita5aa5ce300597224ec76dacc8e63ba3ad7a18bbd (patch)
tree6478eaabc9418b223d639f961fe7b63dabc0e3f5 /kernel/smp.c
parentx86/mm/tlb: Remove unnecessary uses of the inline keyword (diff)
downloadlinux-dev-a5aa5ce300597224ec76dacc8e63ba3ad7a18bbd.tar.xz
linux-dev-a5aa5ce300597224ec76dacc8e63ba3ad7a18bbd.zip
smp: Inline on_each_cpu_cond() and on_each_cpu()
Simplify the code and avoid having an additional function on the stack by inlining on_each_cpu_cond() and on_each_cpu(). Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Nadav Amit <namit@vmware.com> [ Minor edits. ] Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lore.kernel.org/r/20210220231712.2475218-10-namit@vmware.com
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c56
1 files changed, 0 insertions, 56 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index c8a5a1facc1a..b6375d775e93 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -848,55 +848,6 @@ void __init smp_init(void)
}
/*
- * Call a function on all processors. May be used during early boot while
- * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
- * of local_irq_disable/enable().
- */
-void on_each_cpu(smp_call_func_t func, void *info, int wait)
-{
- unsigned long flags;
-
- preempt_disable();
- smp_call_function(func, info, wait);
- local_irq_save(flags);
- func(info);
- local_irq_restore(flags);
- preempt_enable();
-}
-EXPORT_SYMBOL(on_each_cpu);
-
-/**
- * on_each_cpu_mask(): Run a function on processors specified by
- * cpumask, which may include the local processor.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- * on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler. The
- * exception is that it may be used during early boot while
- * early_boot_irqs_disabled is set.
- */
-void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
- void *info, bool wait)
-{
- unsigned int scf_flags;
-
- scf_flags = SCF_RUN_LOCAL;
- if (wait)
- scf_flags |= SCF_WAIT;
-
- preempt_disable();
- smp_call_function_many_cond(mask, func, info, scf_flags, NULL);
- preempt_enable();
-}
-EXPORT_SYMBOL(on_each_cpu_mask);
-
-/*
* on_each_cpu_cond(): Call a function on each processor for which
* the supplied function cond_func returns true, optionally waiting
* for all the required CPUs to finish. This may include the local
@@ -932,13 +883,6 @@ void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
}
EXPORT_SYMBOL(on_each_cpu_cond_mask);
-void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
- void *info, bool wait)
-{
- on_each_cpu_cond_mask(cond_func, func, info, wait, cpu_online_mask);
-}
-EXPORT_SYMBOL(on_each_cpu_cond);
-
static void do_nothing(void *unused)
{
}