aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/hrtimer.h16
-rw-r--r--kernel/time/hrtimer.c95
2 files changed, 105 insertions, 6 deletions
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 7d0d0a36a8f4..5df4bcff96d5 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -192,6 +192,10 @@ enum hrtimer_base_type {
* @nr_retries: Total number of hrtimer interrupt retries
* @nr_hangs: Total number of hrtimer interrupt hangs
* @max_hang_time: Maximum time spent in hrtimer_interrupt
+ * @softirq_expiry_lock: Lock which is taken while softirq based hrtimer are
+ * expired
+ * @timer_waiters: A hrtimer_cancel() invocation waits for the timer
+ * callback to finish.
* @expires_next: absolute time of the next event, is required for remote
* hrtimer enqueue; it is the total first expiry time (hard
* and soft hrtimer are taken into account)
@@ -219,6 +223,10 @@ struct hrtimer_cpu_base {
unsigned short nr_hangs;
unsigned int max_hang_time;
#endif
+#ifdef CONFIG_PREEMPT_RT
+ spinlock_t softirq_expiry_lock;
+ atomic_t timer_waiters;
+#endif
ktime_t expires_next;
struct hrtimer *next_timer;
ktime_t softirq_expires_next;
@@ -350,6 +358,14 @@ extern void hrtimers_resume(void);
DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
+#ifdef CONFIG_PREEMPT_RT
+void hrtimer_cancel_wait_running(const struct hrtimer *timer);
+#else
+static inline void hrtimer_cancel_wait_running(struct hrtimer *timer)
+{
+ cpu_relax();
+}
+#endif
/* Exported timer functions: */
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index c101f88ae8aa..499122752649 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1162,6 +1162,82 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
}
EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
+#ifdef CONFIG_PREEMPT_RT
+static void hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base)
+{
+ spin_lock_init(&base->softirq_expiry_lock);
+}
+
+static void hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base)
+{
+ spin_lock(&base->softirq_expiry_lock);
+}
+
+static void hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base)
+{
+ spin_unlock(&base->softirq_expiry_lock);
+}
+
+/*
+ * The counterpart to hrtimer_cancel_wait_running().
+ *
+ * If there is a waiter for cpu_base->expiry_lock, then it was waiting for
+ * the timer callback to finish. Drop expiry_lock and reaquire it. That
+ * allows the waiter to acquire the lock and make progress.
+ */
+static void hrtimer_sync_wait_running(struct hrtimer_cpu_base *cpu_base,
+ unsigned long flags)
+{
+ if (atomic_read(&cpu_base->timer_waiters)) {
+ raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+ spin_unlock(&cpu_base->softirq_expiry_lock);
+ spin_lock(&cpu_base->softirq_expiry_lock);
+ raw_spin_lock_irq(&cpu_base->lock);
+ }
+}
+
+/*
+ * This function is called on PREEMPT_RT kernels when the fast path
+ * deletion of a timer failed because the timer callback function was
+ * running.
+ *
+ * This prevents priority inversion, if the softirq thread on a remote CPU
+ * got preempted, and it prevents a life lock when the task which tries to
+ * delete a timer preempted the softirq thread running the timer callback
+ * function.
+ */
+void hrtimer_cancel_wait_running(const struct hrtimer *timer)
+{
+ struct hrtimer_clock_base *base = timer->base;
+
+ if (!timer->is_soft || !base || !base->cpu_base) {
+ cpu_relax();
+ return;
+ }
+
+ /*
+ * Mark the base as contended and grab the expiry lock, which is
+ * held by the softirq across the timer callback. Drop the lock
+ * immediately so the softirq can expire the next timer. In theory
+ * the timer could already be running again, but that's more than
+ * unlikely and just causes another wait loop.
+ */
+ atomic_inc(&base->cpu_base->timer_waiters);
+ spin_lock_bh(&base->cpu_base->softirq_expiry_lock);
+ atomic_dec(&base->cpu_base->timer_waiters);
+ spin_unlock_bh(&base->cpu_base->softirq_expiry_lock);
+}
+#else
+static inline void
+hrtimer_cpu_base_init_expiry_lock(struct hrtimer_cpu_base *base) { }
+static inline void
+hrtimer_cpu_base_lock_expiry(struct hrtimer_cpu_base *base) { }
+static inline void
+hrtimer_cpu_base_unlock_expiry(struct hrtimer_cpu_base *base) { }
+static inline void hrtimer_sync_wait_running(struct hrtimer_cpu_base *base,
+ unsigned long flags) { }
+#endif
+
/**
* hrtimer_cancel - cancel a timer and wait for the handler to finish.
* @timer: the timer to be cancelled
@@ -1172,13 +1248,15 @@ EXPORT_SYMBOL_GPL(hrtimer_try_to_cancel);
*/
int hrtimer_cancel(struct hrtimer *timer)
{
- for (;;) {
- int ret = hrtimer_try_to_cancel(timer);
+ int ret;
- if (ret >= 0)
- return ret;
- cpu_relax();
- }
+ do {
+ ret = hrtimer_try_to_cancel(timer);
+
+ if (ret < 0)
+ hrtimer_cancel_wait_running(timer);
+ } while (ret < 0);
+ return ret;
}
EXPORT_SYMBOL_GPL(hrtimer_cancel);
@@ -1475,6 +1553,8 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now,
break;
__run_hrtimer(cpu_base, base, timer, &basenow, flags);
+ if (active_mask == HRTIMER_ACTIVE_SOFT)
+ hrtimer_sync_wait_running(cpu_base, flags);
}
}
}
@@ -1485,6 +1565,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
unsigned long flags;
ktime_t now;
+ hrtimer_cpu_base_lock_expiry(cpu_base);
raw_spin_lock_irqsave(&cpu_base->lock, flags);
now = hrtimer_update_base(cpu_base);
@@ -1494,6 +1575,7 @@ static __latent_entropy void hrtimer_run_softirq(struct softirq_action *h)
hrtimer_update_softirq_timer(cpu_base, true);
raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
+ hrtimer_cpu_base_unlock_expiry(cpu_base);
}
#ifdef CONFIG_HIGH_RES_TIMERS
@@ -1897,6 +1979,7 @@ int hrtimers_prepare_cpu(unsigned int cpu)
cpu_base->softirq_next_timer = NULL;
cpu_base->expires_next = KTIME_MAX;
cpu_base->softirq_expires_next = KTIME_MAX;
+ hrtimer_cpu_base_init_expiry_lock(cpu_base);
return 0;
}