aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/kernel/softirq.c
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--kernel/softirq.c98
1 files changed, 83 insertions, 15 deletions
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d082e7840f88..4dae6ac2e83f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -280,17 +280,24 @@ static inline void invoke_softirq(void)
wakeup_softirqd();
}
+#define SCHED_SOFTIRQ_MASK BIT(SCHED_SOFTIRQ)
+
/*
* flush_smp_call_function_queue() can raise a soft interrupt in a function
- * call. On RT kernels this is undesired and the only known functionality
- * in the block layer which does this is disabled on RT. If soft interrupts
- * get raised which haven't been raised before the flush, warn so it can be
+ * call. On RT kernels this is undesired and the only known functionalities
+ * are in the block layer which is disabled on RT, and in the scheduler for
+ * idle load balancing. If soft interrupts get raised which haven't been
+ * raised before the flush, warn if it is not a SCHED_SOFTIRQ so it can be
* investigated.
*/
void do_softirq_post_smp_call_flush(unsigned int was_pending)
{
- if (WARN_ON_ONCE(was_pending != local_softirq_pending()))
+ unsigned int is_pending = local_softirq_pending();
+
+ if (unlikely(was_pending != is_pending)) {
+ WARN_ON_ONCE(was_pending != (is_pending & ~SCHED_SOFTIRQ_MASK));
invoke_softirq();
+ }
}
#else /* CONFIG_PREEMPT_RT */
@@ -624,6 +631,24 @@ static inline void tick_irq_exit(void)
#endif
}
+#ifdef CONFIG_IRQ_FORCED_THREADING
+DEFINE_PER_CPU(struct task_struct *, ktimerd);
+DEFINE_PER_CPU(unsigned long, pending_timer_softirq);
+
+static void wake_timersd(void)
+{
+ struct task_struct *tsk = __this_cpu_read(ktimerd);
+
+ if (tsk)
+ wake_up_process(tsk);
+}
+
+#else
+
+static inline void wake_timersd(void) { }
+
+#endif
+
static inline void __irq_exit_rcu(void)
{
#ifndef __ARCH_IRQ_EXIT_IRQS_DISABLED
@@ -636,6 +661,10 @@ static inline void __irq_exit_rcu(void)
if (!in_interrupt() && local_softirq_pending())
invoke_softirq();
+ if (IS_ENABLED(CONFIG_IRQ_FORCED_THREADING) && force_irqthreads() &&
+ local_timers_pending_force_th() && !(in_nmi() | in_hardirq()))
+ wake_timersd();
+
tick_irq_exit();
}
@@ -748,10 +777,8 @@ EXPORT_SYMBOL(__tasklet_hi_schedule);
static bool tasklet_clear_sched(struct tasklet_struct *t)
{
- if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) {
- wake_up_var(&t->state);
+ if (test_and_clear_wake_up_bit(TASKLET_STATE_SCHED, &t->state))
return true;
- }
WARN_ONCE(1, "tasklet SCHED state not set: %s %pS\n",
t->use_callback ? "callback" : "func",
@@ -871,8 +898,7 @@ void tasklet_kill(struct tasklet_struct *t)
if (in_interrupt())
pr_notice("Attempt to kill tasklet from interrupt\n");
- while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
- wait_var_event(&t->state, !test_bit(TASKLET_STATE_SCHED, &t->state));
+ wait_on_bit_lock(&t->state, TASKLET_STATE_SCHED, TASK_UNINTERRUPTIBLE);
tasklet_unlock_wait(t);
tasklet_clear_sched(t);
@@ -882,16 +908,13 @@ EXPORT_SYMBOL(tasklet_kill);
#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
void tasklet_unlock(struct tasklet_struct *t)
{
- smp_mb__before_atomic();
- clear_bit(TASKLET_STATE_RUN, &t->state);
- smp_mb__after_atomic();
- wake_up_var(&t->state);
+ clear_and_wake_up_bit(TASKLET_STATE_RUN, &t->state);
}
EXPORT_SYMBOL_GPL(tasklet_unlock);
void tasklet_unlock_wait(struct tasklet_struct *t)
{
- wait_var_event(&t->state, !test_bit(TASKLET_STATE_RUN, &t->state));
+ wait_on_bit(&t->state, TASKLET_STATE_RUN, TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL_GPL(tasklet_unlock_wait);
#endif
@@ -971,12 +994,57 @@ static struct smp_hotplug_thread softirq_threads = {
.thread_comm = "ksoftirqd/%u",
};
+#ifdef CONFIG_IRQ_FORCED_THREADING
+static void ktimerd_setup(unsigned int cpu)
+{
+ /* Above SCHED_NORMAL to handle timers before regular tasks. */
+ sched_set_fifo_low(current);
+}
+
+static int ktimerd_should_run(unsigned int cpu)
+{
+ return local_timers_pending_force_th();
+}
+
+void raise_ktimers_thread(unsigned int nr)
+{
+ trace_softirq_raise(nr);
+ __this_cpu_or(pending_timer_softirq, BIT(nr));
+}
+
+static void run_ktimerd(unsigned int cpu)
+{
+ unsigned int timer_si;
+
+ ksoftirqd_run_begin();
+
+ timer_si = local_timers_pending_force_th();
+ __this_cpu_write(pending_timer_softirq, 0);
+ or_softirq_pending(timer_si);
+
+ __do_softirq();
+
+ ksoftirqd_run_end();
+}
+
+static struct smp_hotplug_thread timer_thread = {
+ .store = &ktimerd,
+ .setup = ktimerd_setup,
+ .thread_should_run = ktimerd_should_run,
+ .thread_fn = run_ktimerd,
+ .thread_comm = "ktimers/%u",
+};
+#endif
+
static __init int spawn_ksoftirqd(void)
{
cpuhp_setup_state_nocalls(CPUHP_SOFTIRQ_DEAD, "softirq:dead", NULL,
takeover_tasklets);
BUG_ON(smpboot_register_percpu_thread(&softirq_threads));
-
+#ifdef CONFIG_IRQ_FORCED_THREADING
+ if (force_irqthreads())
+ BUG_ON(smpboot_register_percpu_thread(&timer_thread));
+#endif
return 0;
}
early_initcall(spawn_ksoftirqd);