From 77dcc6233e0def71e104d728ab5a39c2fca51127 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Mar 2019 23:48:19 +0100 Subject: mac80211_hwsim: Replace hrtimer tasklet with softirq hrtimer Switch the timer to HRTIMER_MODE_REL_SOFT, which executed the timer callback in softirq context and remove the hrtimer_tasklet. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Reviewed-by: Johannes Berg Acked-by: David S. Miller Cc: netdev@vger.kernel.org Cc: linux-wireless@vger.kernel.org Cc: Kalle Valo Link: https://lkml.kernel.org/r/20190301224821.29843-2-bigeasy@linutronix.de --- drivers/net/wireless/mac80211_hwsim.c | 46 ++++++++++++++++------------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 0838af04d681..2a407a71bcd1 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -521,7 +521,7 @@ struct mac80211_hwsim_data { unsigned int rx_filter; bool started, idle, scanning; struct mutex mutex; - struct tasklet_hrtimer beacon_timer; + struct hrtimer beacon_timer; enum ps_mode { PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL } ps; @@ -1460,7 +1460,7 @@ static void mac80211_hwsim_stop(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; data->started = false; - tasklet_hrtimer_cancel(&data->beacon_timer); + hrtimer_cancel(&data->beacon_timer); wiphy_dbg(hw->wiphy, "%s\n", __func__); } @@ -1583,14 +1583,12 @@ static enum hrtimer_restart mac80211_hwsim_beacon(struct hrtimer *timer) { struct mac80211_hwsim_data *data = - container_of(timer, struct mac80211_hwsim_data, - beacon_timer.timer); + container_of(timer, struct mac80211_hwsim_data, beacon_timer); struct ieee80211_hw *hw = data->hw; u64 bcn_int = data->beacon_int; - ktime_t next_bcn; if (!data->started) - goto out; + return HRTIMER_NORESTART; ieee80211_iterate_active_interfaces_atomic( hw, IEEE80211_IFACE_ITER_NORMAL, @@ -1601,12 +1599,9 @@ mac80211_hwsim_beacon(struct hrtimer *timer) bcn_int -= data->bcn_delta; data->bcn_delta = 0; } - - next_bcn = ktime_add(hrtimer_get_expires(timer), - ns_to_ktime(bcn_int * 1000)); - tasklet_hrtimer_start(&data->beacon_timer, next_bcn, HRTIMER_MODE_ABS); -out: - return HRTIMER_NORESTART; + hrtimer_forward(&data->beacon_timer, hrtimer_get_expires(timer), + ns_to_ktime(bcn_int * NSEC_PER_USEC)); + return HRTIMER_RESTART; } static const char * const hwsim_chanwidths[] = { @@ -1680,15 +1675,15 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) mutex_unlock(&data->mutex); if (!data->started || !data->beacon_int) - tasklet_hrtimer_cancel(&data->beacon_timer); - else if (!hrtimer_is_queued(&data->beacon_timer.timer)) { + hrtimer_cancel(&data->beacon_timer); + else if (!hrtimer_is_queued(&data->beacon_timer)) { u64 tsf = mac80211_hwsim_get_tsf(hw, NULL); u32 bcn_int = data->beacon_int; u64 until_tbtt = bcn_int - do_div(tsf, bcn_int); - tasklet_hrtimer_start(&data->beacon_timer, - ns_to_ktime(until_tbtt * 1000), - HRTIMER_MODE_REL); + hrtimer_start(&data->beacon_timer, + ns_to_ktime(until_tbtt * NSEC_PER_USEC), + HRTIMER_MODE_REL_SOFT); } return 0; @@ -1751,7 +1746,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, info->enable_beacon, info->beacon_int); vp->bcn_en = info->enable_beacon; if (data->started && - !hrtimer_is_queued(&data->beacon_timer.timer) && + !hrtimer_is_queued(&data->beacon_timer) && info->enable_beacon) { u64 tsf, until_tbtt; u32 bcn_int; @@ -1759,9 +1754,10 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, tsf = mac80211_hwsim_get_tsf(hw, vif); bcn_int = data->beacon_int; until_tbtt = bcn_int - do_div(tsf, bcn_int); - tasklet_hrtimer_start(&data->beacon_timer, - ns_to_ktime(until_tbtt * 1000), - HRTIMER_MODE_REL); + + hrtimer_start(&data->beacon_timer, + ns_to_ktime(until_tbtt * NSEC_PER_USEC), + HRTIMER_MODE_REL_SOFT); } else if (!info->enable_beacon) { unsigned int count = 0; ieee80211_iterate_active_interfaces_atomic( @@ -1770,7 +1766,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw, wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u", count); if (count == 0) { - tasklet_hrtimer_cancel(&data->beacon_timer); + hrtimer_cancel(&data->beacon_timer); data->beacon_int = 0; } } @@ -2922,9 +2918,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); - tasklet_hrtimer_init(&data->beacon_timer, - mac80211_hwsim_beacon, - CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + hrtimer_init(&data->beacon_timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS_SOFT); + data->beacon_timer.function = mac80211_hwsim_beacon; err = ieee80211_register_hw(hw); if (err < 0) { -- cgit v1.2.3-59-g8ed1b From 671422b2205b5f2c49948b686306b207a5975dd9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Mar 2019 23:48:20 +0100 Subject: xfrm: Replace hrtimer tasklet with softirq hrtimer Switch the timer to HRTIMER_MODE_SOFT, which executed the timer callback in softirq context and remove the hrtimer_tasklet. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: David S. Miller Cc: netdev@vger.kernel.org Cc: Steffen Klassert Cc: Herbert Xu Link: https://lkml.kernel.org/r/20190301224821.29843-3-bigeasy@linutronix.de --- include/net/xfrm.h | 2 +- net/xfrm/xfrm_state.c | 30 ++++++++++++++++++------------ 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 85386becbaea..bbcf0b650b81 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -219,7 +219,7 @@ struct xfrm_state { struct xfrm_stats stats; struct xfrm_lifetime_cur curlft; - struct tasklet_hrtimer mtimer; + struct hrtimer mtimer; struct xfrm_state_offload xso; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 1bb971f46fc6..586b4d656abd 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -434,7 +434,7 @@ EXPORT_SYMBOL(xfrm_state_free); static void ___xfrm_state_destroy(struct xfrm_state *x) { - tasklet_hrtimer_cancel(&x->mtimer); + hrtimer_cancel(&x->mtimer); del_timer_sync(&x->rtimer); kfree(x->aead); kfree(x->aalg); @@ -479,8 +479,8 @@ static void xfrm_state_gc_task(struct work_struct *work) static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) { - struct tasklet_hrtimer *thr = container_of(me, struct tasklet_hrtimer, timer); - struct xfrm_state *x = container_of(thr, struct xfrm_state, mtimer); + struct xfrm_state *x = container_of(me, struct xfrm_state, mtimer); + enum hrtimer_restart ret = HRTIMER_NORESTART; time64_t now = ktime_get_real_seconds(); time64_t next = TIME64_MAX; int warn = 0; @@ -544,7 +544,8 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me) km_state_expired(x, 0, 0); resched: if (next != TIME64_MAX) { - tasklet_hrtimer_start(&x->mtimer, ktime_set(next, 0), HRTIMER_MODE_REL); + hrtimer_forward_now(&x->mtimer, ktime_set(next, 0)); + ret = HRTIMER_RESTART; } goto out; @@ -561,7 +562,7 @@ expired: out: spin_unlock(&x->lock); - return HRTIMER_NORESTART; + return ret; } static void xfrm_replay_timer_handler(struct timer_list *t); @@ -580,8 +581,8 @@ struct xfrm_state *xfrm_state_alloc(struct net *net) INIT_HLIST_NODE(&x->bydst); INIT_HLIST_NODE(&x->bysrc); INIT_HLIST_NODE(&x->byspi); - tasklet_hrtimer_init(&x->mtimer, xfrm_timer_handler, - CLOCK_BOOTTIME, HRTIMER_MODE_ABS); + hrtimer_init(&x->mtimer, CLOCK_BOOTTIME, HRTIMER_MODE_ABS_SOFT); + x->mtimer.function = xfrm_timer_handler; timer_setup(&x->rtimer, xfrm_replay_timer_handler, 0); x->curlft.add_time = ktime_get_real_seconds(); x->lft.soft_byte_limit = XFRM_INF; @@ -1047,7 +1048,9 @@ found: hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); } x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; - tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); + hrtimer_start(&x->mtimer, + ktime_set(net->xfrm.sysctl_acq_expires, 0), + HRTIMER_MODE_REL_SOFT); net->xfrm.state_num++; xfrm_hash_grow_check(net, x->bydst.next != NULL); spin_unlock_bh(&net->xfrm.xfrm_state_lock); @@ -1159,7 +1162,7 @@ static void __xfrm_state_insert(struct xfrm_state *x) hlist_add_head_rcu(&x->byspi, net->xfrm.state_byspi + h); } - tasklet_hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); + hrtimer_start(&x->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL_SOFT); if (x->replay_maxage) mod_timer(&x->rtimer, jiffies + x->replay_maxage); @@ -1266,7 +1269,9 @@ static struct xfrm_state *__find_acq_core(struct net *net, x->mark.m = m->m; x->lft.hard_add_expires_seconds = net->xfrm.sysctl_acq_expires; xfrm_state_hold(x); - tasklet_hrtimer_start(&x->mtimer, ktime_set(net->xfrm.sysctl_acq_expires, 0), HRTIMER_MODE_REL); + hrtimer_start(&x->mtimer, + ktime_set(net->xfrm.sysctl_acq_expires, 0), + HRTIMER_MODE_REL_SOFT); list_add(&x->km.all, &net->xfrm.state_all); hlist_add_head_rcu(&x->bydst, net->xfrm.state_bydst + h); h = xfrm_src_hash(net, daddr, saddr, family); @@ -1571,7 +1576,8 @@ out: memcpy(&x1->lft, &x->lft, sizeof(x1->lft)); x1->km.dying = 0; - tasklet_hrtimer_start(&x1->mtimer, ktime_set(1, 0), HRTIMER_MODE_REL); + hrtimer_start(&x1->mtimer, ktime_set(1, 0), + HRTIMER_MODE_REL_SOFT); if (x1->curlft.use_time) xfrm_state_check_expire(x1); @@ -1610,7 +1616,7 @@ int xfrm_state_check_expire(struct xfrm_state *x) if (x->curlft.bytes >= x->lft.hard_byte_limit || x->curlft.packets >= x->lft.hard_packet_limit) { x->km.state = XFRM_STATE_EXPIRED; - tasklet_hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL); + hrtimer_start(&x->mtimer, 0, HRTIMER_MODE_REL_SOFT); return -EINVAL; } -- cgit v1.2.3-59-g8ed1b From d7dcf26ff0ffd7b56fe2b09ed7f1867589f3cdf1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 1 Mar 2019 23:48:21 +0100 Subject: softirq: Remove tasklet_hrtimer There are no more users of this interface. Remove it. Signed-off-by: Thomas Gleixner Signed-off-by: Anna-Maria Gleixner Signed-off-by: Sebastian Andrzej Siewior Signed-off-by: Thomas Gleixner Acked-by: David S. Miller Cc: netdev@vger.kernel.org Link: https://lkml.kernel.org/r/20190301224821.29843-4-bigeasy@linutronix.de --- include/linux/interrupt.h | 25 ----------------------- kernel/softirq.c | 51 ----------------------------------------------- 2 files changed, 76 deletions(-) diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 690b238a44d5..c7eef32e7739 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -668,31 +668,6 @@ extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); extern void tasklet_init(struct tasklet_struct *t, void (*func)(unsigned long), unsigned long data); -struct tasklet_hrtimer { - struct hrtimer timer; - struct tasklet_struct tasklet; - enum hrtimer_restart (*function)(struct hrtimer *); -}; - -extern void -tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, - enum hrtimer_restart (*function)(struct hrtimer *), - clockid_t which_clock, enum hrtimer_mode mode); - -static inline -void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time, - const enum hrtimer_mode mode) -{ - hrtimer_start(&ttimer->timer, time, mode); -} - -static inline -void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer) -{ - hrtimer_cancel(&ttimer->timer); - tasklet_kill(&ttimer->tasklet); -} - /* * Autoprobing for irqs: * diff --git a/kernel/softirq.c b/kernel/softirq.c index 10277429ed84..2c3382378d94 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -573,57 +573,6 @@ void tasklet_kill(struct tasklet_struct *t) } EXPORT_SYMBOL(tasklet_kill); -/* - * tasklet_hrtimer - */ - -/* - * The trampoline is called when the hrtimer expires. It schedules a tasklet - * to run __tasklet_hrtimer_trampoline() which in turn will call the intended - * hrtimer callback, but from softirq context. - */ -static enum hrtimer_restart __hrtimer_tasklet_trampoline(struct hrtimer *timer) -{ - struct tasklet_hrtimer *ttimer = - container_of(timer, struct tasklet_hrtimer, timer); - - tasklet_hi_schedule(&ttimer->tasklet); - return HRTIMER_NORESTART; -} - -/* - * Helper function which calls the hrtimer callback from - * tasklet/softirq context - */ -static void __tasklet_hrtimer_trampoline(unsigned long data) -{ - struct tasklet_hrtimer *ttimer = (void *)data; - enum hrtimer_restart restart; - - restart = ttimer->function(&ttimer->timer); - if (restart != HRTIMER_NORESTART) - hrtimer_restart(&ttimer->timer); -} - -/** - * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks - * @ttimer: tasklet_hrtimer which is initialized - * @function: hrtimer callback function which gets called from softirq context - * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) - * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) - */ -void tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer, - enum hrtimer_restart (*function)(struct hrtimer *), - clockid_t which_clock, enum hrtimer_mode mode) -{ - hrtimer_init(&ttimer->timer, which_clock, mode); - ttimer->timer.function = __hrtimer_tasklet_trampoline; - tasklet_init(&ttimer->tasklet, __tasklet_hrtimer_trampoline, - (unsigned long)ttimer); - ttimer->function = function; -} -EXPORT_SYMBOL_GPL(tasklet_hrtimer_init); - void __init softirq_init(void) { int cpu; -- cgit v1.2.3-59-g8ed1b From e1e41b6ce5f9c1a80bf4f2404ec5ab11c6c5a2ad Mon Sep 17 00:00:00 2001 From: Rasmus Villemoes Date: Mon, 18 Mar 2019 20:55:56 +0100 Subject: timekeeping: Consistently use unsigned int for seqcount snapshot The timekeeping code uses a random mix of "unsigned long" and "unsigned int" for the seqcount snapshots (ratio 14:12). Since the seqlock.h API is entirely based on unsigned int, use that throughout. Signed-off-by: Rasmus Villemoes Signed-off-by: Thomas Gleixner Cc: Frederic Weisbecker Cc: John Stultz Cc: Stephen Boyd Link: https://lkml.kernel.org/r/20190318195557.20773-1-linux@rasmusvillemoes.dk --- kernel/time/jiffies.c | 2 +- kernel/time/sched_clock.c | 4 ++-- kernel/time/tick-common.c | 2 +- kernel/time/tick-sched.c | 3 ++- kernel/time/timekeeping.c | 18 +++++++++--------- 5 files changed, 15 insertions(+), 14 deletions(-) diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index dc1b6f1929f9..95f8f3304c19 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -63,7 +63,7 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); #if (BITS_PER_LONG < 64) u64 get_jiffies_64(void) { - unsigned long seq; + unsigned int seq; u64 ret; do { diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c index 094b82ca95e5..16b80c2b4fe8 100644 --- a/kernel/time/sched_clock.c +++ b/kernel/time/sched_clock.c @@ -94,7 +94,7 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) unsigned long long notrace sched_clock(void) { u64 cyc, res; - unsigned long seq; + unsigned int seq; struct clock_read_data *rd; do { @@ -267,7 +267,7 @@ void __init generic_sched_clock_init(void) */ static u64 notrace suspended_sched_clock_read(void) { - unsigned long seq = raw_read_seqcount(&cd.seq); + unsigned int seq = raw_read_seqcount(&cd.seq); return cd.read_data[seq & 1].epoch_cyc; } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 529143b4c8d2..561641b2153f 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -149,7 +149,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast) !tick_broadcast_oneshot_active()) { clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC); } else { - unsigned long seq; + unsigned int seq; ktime_t next; do { diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 6fa52cd6df0b..b50f6f22c88e 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -645,7 +645,8 @@ static inline bool local_timer_softirq_pending(void) static ktime_t tick_nohz_next_event(struct tick_sched *ts, int cpu) { u64 basemono, next_tick, next_tmr, next_rcu, delta, expires; - unsigned long seq, basejiff; + unsigned long basejiff; + unsigned int seq; /* Read jiffies and the time when jiffies were updated last */ do { diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index f986e1918d12..540145da33da 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -720,7 +720,7 @@ static void timekeeping_forward_now(struct timekeeper *tk) void ktime_get_real_ts64(struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; - unsigned long seq; + unsigned int seq; u64 nsecs; WARN_ON(timekeeping_suspended); @@ -829,7 +829,7 @@ EXPORT_SYMBOL_GPL(ktime_get_coarse_with_offset); ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs) { ktime_t *offset = offsets[offs]; - unsigned long seq; + unsigned int seq; ktime_t tconv; do { @@ -960,7 +960,7 @@ time64_t __ktime_get_real_seconds(void) void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) { struct timekeeper *tk = &tk_core.timekeeper; - unsigned long seq; + unsigned int seq; ktime_t base_raw; ktime_t base_real; u64 nsec_raw; @@ -1122,7 +1122,7 @@ int get_device_system_crosststamp(int (*get_time_fn) ktime_t base_real, base_raw; u64 nsec_real, nsec_raw; u8 cs_was_changed_seq; - unsigned long seq; + unsigned int seq; bool do_interp; int ret; @@ -1409,7 +1409,7 @@ int timekeeping_notify(struct clocksource *clock) void ktime_get_raw_ts64(struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; - unsigned long seq; + unsigned int seq; u64 nsecs; do { @@ -1431,7 +1431,7 @@ EXPORT_SYMBOL(ktime_get_raw_ts64); int timekeeping_valid_for_hres(void) { struct timekeeper *tk = &tk_core.timekeeper; - unsigned long seq; + unsigned int seq; int ret; do { @@ -1450,7 +1450,7 @@ int timekeeping_valid_for_hres(void) u64 timekeeping_max_deferment(void) { struct timekeeper *tk = &tk_core.timekeeper; - unsigned long seq; + unsigned int seq; u64 ret; do { @@ -2150,7 +2150,7 @@ EXPORT_SYMBOL_GPL(getboottime64); void ktime_get_coarse_real_ts64(struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; - unsigned long seq; + unsigned int seq; do { seq = read_seqcount_begin(&tk_core.seq); @@ -2164,7 +2164,7 @@ void ktime_get_coarse_ts64(struct timespec64 *ts) { struct timekeeper *tk = &tk_core.timekeeper; struct timespec64 now, mono; - unsigned long seq; + unsigned int seq; do { seq = read_seqcount_begin(&tk_core.seq); -- cgit v1.2.3-59-g8ed1b From 1b72d43237980eab9b6ae6bb8181e51c840377e6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 21 Mar 2019 16:39:20 +0100 Subject: tick: Remove outgoing CPU from broadcast masks Valentin reported that unplugging a CPU occasionally results in a warning in the tick broadcast code which is triggered when an offline CPU is in the broadcast mask. This happens because the outgoing CPU is not removing itself from the broadcast masks, especially not from the broadcast_force_mask. The removal happens on the control CPU after the outgoing CPU is dead. It's a long standing issue, but the warning is harmless. Rework the hotplug mechanism so that the outgoing CPU removes itself from the broadcast masks after disabling interrupts and removing itself from the online mask. Reported-by: Valentin Schneider Signed-off-by: Thomas Gleixner Tested-by: Valentin Schneider Cc: Frederic Weisbecker Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1903211540180.1784@nanos.tec.linutronix.de --- include/linux/tick.h | 6 ++++++ kernel/cpu.c | 2 ++ kernel/time/clockevents.c | 18 ++++++++++++++++-- kernel/time/tick-broadcast.c | 40 +++++++++++++++++++--------------------- kernel/time/tick-internal.h | 10 ++++++---- 5 files changed, 49 insertions(+), 27 deletions(-) diff --git a/include/linux/tick.h b/include/linux/tick.h index 55388ab45fd4..76acb48acdb7 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -68,6 +68,12 @@ extern void tick_broadcast_control(enum tick_broadcast_mode mode); static inline void tick_broadcast_control(enum tick_broadcast_mode mode) { } #endif /* BROADCAST */ +#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU) +extern void tick_offline_cpu(unsigned int cpu); +#else +static inline void tick_offline_cpu(unsigned int cpu) { } +#endif + #ifdef CONFIG_GENERIC_CLOCKEVENTS extern int tick_broadcast_oneshot_control(enum tick_broadcast_state state); #else diff --git a/kernel/cpu.c b/kernel/cpu.c index 025f419d16f6..f69ba38573c2 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -844,6 +844,8 @@ static int take_cpu_down(void *_param) /* Give up timekeeping duties */ tick_handover_do_timer(); + /* Remove CPU from timer broadcasting */ + tick_offline_cpu(cpu); /* Park the stopper thread */ stop_machine_park(cpu); return 0; diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c index 5e77662dd2d9..f5490222e134 100644 --- a/kernel/time/clockevents.c +++ b/kernel/time/clockevents.c @@ -611,6 +611,22 @@ void clockevents_resume(void) } #ifdef CONFIG_HOTPLUG_CPU + +# ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST +/** + * tick_offline_cpu - Take CPU out of the broadcast mechanism + * @cpu: The outgoing CPU + * + * Called on the outgoing CPU after it took itself offline. + */ +void tick_offline_cpu(unsigned int cpu) +{ + raw_spin_lock(&clockevents_lock); + tick_broadcast_offline(cpu); + raw_spin_unlock(&clockevents_lock); +} +# endif + /** * tick_cleanup_dead_cpu - Cleanup the tick and clockevents of a dead cpu */ @@ -621,8 +637,6 @@ void tick_cleanup_dead_cpu(int cpu) raw_spin_lock_irqsave(&clockevents_lock, flags); - tick_shutdown_broadcast_oneshot(cpu); - tick_shutdown_broadcast(cpu); tick_shutdown(cpu); /* * Unregister the clock event devices which were diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index ee834d4fb814..0283523de045 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -36,10 +36,12 @@ static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); static void tick_broadcast_clear_oneshot(int cpu); static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); +static void tick_broadcast_oneshot_offline(unsigned int cpu); #else static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } static inline void tick_broadcast_clear_oneshot(int cpu) { } static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } +static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { } #endif /* @@ -433,27 +435,29 @@ void tick_set_periodic_handler(struct clock_event_device *dev, int broadcast) } #ifdef CONFIG_HOTPLUG_CPU -/* - * Remove a CPU from broadcasting - */ -void tick_shutdown_broadcast(unsigned int cpu) +static void tick_shutdown_broadcast(void) { - struct clock_event_device *bc; - unsigned long flags; - - raw_spin_lock_irqsave(&tick_broadcast_lock, flags); - - bc = tick_broadcast_device.evtdev; - cpumask_clear_cpu(cpu, tick_broadcast_mask); - cpumask_clear_cpu(cpu, tick_broadcast_on); + struct clock_event_device *bc = tick_broadcast_device.evtdev; if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC) { if (bc && cpumask_empty(tick_broadcast_mask)) clockevents_shutdown(bc); } +} - raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); +/* + * Remove a CPU from broadcasting + */ +void tick_broadcast_offline(unsigned int cpu) +{ + raw_spin_lock(&tick_broadcast_lock); + cpumask_clear_cpu(cpu, tick_broadcast_mask); + cpumask_clear_cpu(cpu, tick_broadcast_on); + tick_broadcast_oneshot_offline(cpu); + tick_shutdown_broadcast(); + raw_spin_unlock(&tick_broadcast_lock); } + #endif void tick_suspend_broadcast(void) @@ -950,14 +954,10 @@ void hotplug_cpu__broadcast_tick_pull(int deadcpu) } /* - * Remove a dead CPU from broadcasting + * Remove a dying CPU from broadcasting */ -void tick_shutdown_broadcast_oneshot(unsigned int cpu) +static void tick_broadcast_oneshot_offline(unsigned int cpu) { - unsigned long flags; - - raw_spin_lock_irqsave(&tick_broadcast_lock, flags); - /* * Clear the broadcast masks for the dead cpu, but do not stop * the broadcast device! @@ -965,8 +965,6 @@ void tick_shutdown_broadcast_oneshot(unsigned int cpu) cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask); cpumask_clear_cpu(cpu, tick_broadcast_pending_mask); cpumask_clear_cpu(cpu, tick_broadcast_force_mask); - - raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); } #endif diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index e277284c2831..7b2496136729 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h @@ -64,7 +64,6 @@ extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); extern int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu); extern void tick_install_broadcast_device(struct clock_event_device *dev); extern int tick_is_broadcast_device(struct clock_event_device *dev); -extern void tick_shutdown_broadcast(unsigned int cpu); extern void tick_suspend_broadcast(void); extern void tick_resume_broadcast(void); extern bool tick_resume_check_broadcast(void); @@ -78,7 +77,6 @@ static inline void tick_install_broadcast_device(struct clock_event_device *dev) static inline int tick_is_broadcast_device(struct clock_event_device *dev) { return 0; } static inline int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu) { return 0; } static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } -static inline void tick_shutdown_broadcast(unsigned int cpu) { } static inline void tick_suspend_broadcast(void) { } static inline void tick_resume_broadcast(void) { } static inline bool tick_resume_check_broadcast(void) { return false; } @@ -128,19 +126,23 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } /* Functions related to oneshot broadcasting */ #if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_TICK_ONESHOT) extern void tick_broadcast_switch_to_oneshot(void); -extern void tick_shutdown_broadcast_oneshot(unsigned int cpu); extern int tick_broadcast_oneshot_active(void); extern void tick_check_oneshot_broadcast_this_cpu(void); bool tick_broadcast_oneshot_available(void); extern struct cpumask *tick_get_broadcast_oneshot_mask(void); #else /* !(BROADCAST && ONESHOT): */ static inline void tick_broadcast_switch_to_oneshot(void) { } -static inline void tick_shutdown_broadcast_oneshot(unsigned int cpu) { } static inline int tick_broadcast_oneshot_active(void) { return 0; } static inline void tick_check_oneshot_broadcast_this_cpu(void) { } static inline bool tick_broadcast_oneshot_available(void) { return tick_oneshot_possible(); } #endif /* !(BROADCAST && ONESHOT) */ +#if defined(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) && defined(CONFIG_HOTPLUG_CPU) +extern void tick_broadcast_offline(unsigned int cpu); +#else +static inline void tick_broadcast_offline(unsigned int cpu) { } +#endif + /* NO_HZ_FULL internal */ #ifdef CONFIG_NO_HZ_FULL extern void tick_nohz_init(void); -- cgit v1.2.3-59-g8ed1b From d6b87eaf10bd061914f6d277d7428b3285d8850e Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Thu, 21 Mar 2019 13:09:18 +0100 Subject: tick/sched: Update tick_sched struct documentation Adapt the documentation order of struct members to the effective order of struct members and add missing descriptions. Signed-off-by: Anna-Maria Gleixner Signed-off-by: Thomas Gleixner Cc: fweisbec@gmail.com Cc: peterz@infradead.org Link: https://lkml.kernel.org/r/20190321120921.16463-2-anna-maria@linutronix.de --- kernel/time/tick-sched.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index 6de959a854b2..4fb06527cf64 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -24,12 +24,19 @@ enum tick_nohz_mode { * struct tick_sched - sched tick emulation and no idle tick control/stats * @sched_timer: hrtimer to schedule the periodic tick in high * resolution mode + * @check_clocks: Notification mechanism about clocksource changes + * @nohz_mode: Mode - one state of tick_nohz_mode + * @inidle: Indicator that the CPU is in the tick idle mode + * @tick_stopped: Indicator that the idle tick has been stopped + * @idle_active: Indicator that the CPU is actively in the tick idle mode; + * it is resetted during irq handling phases. + * @do_timer_lst: CPU was the last one doing do_timer before going idle + * @got_idle_tick: Tick timer function has run with @inidle set * @last_tick: Store the last tick expiry time when the tick * timer is modified for nohz sleeps. This is necessary * to resume the tick timer operation in the timeline * when the CPU returns from nohz sleep. * @next_tick: Next tick to be fired when in dynticks mode. - * @tick_stopped: Indicator that the idle tick has been stopped * @idle_jiffies: jiffies at the entry to idle for idle time accounting * @idle_calls: Total number of idle calls * @idle_sleeps: Number of idle calls, where the sched tick was stopped @@ -40,8 +47,8 @@ enum tick_nohz_mode { * @iowait_sleeptime: Sum of the time slept in idle with sched tick stopped, with IO outstanding * @timer_expires: Anticipated timer expiration time (in case sched tick is stopped) * @timer_expires_base: Base time clock monotonic for @timer_expires - * @do_timer_lst: CPU was the last one doing do_timer before going idle - * @got_idle_tick: Tick timer function has run with @inidle set + * @next_timer: Expiry time of next expiring timer for debugging purpose only + * @tick_dep_mask: Tick dependency mask - is set, if someone needs the tick */ struct tick_sched { struct hrtimer sched_timer; -- cgit v1.2.3-59-g8ed1b From dc1e7dc5ac6254ba0502323381a7ec847e408f1d Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Thu, 21 Mar 2019 13:09:19 +0100 Subject: timer: Move trace point to get proper index When placing the timer_start trace point before the timer wheel bucket index is calculated, the index information in the trace point is useless. It is not possible to simply move the debug_activate() call after the index calculation, because debug_object_activate() needs to be called before touching the object. Therefore split debug_activate() and move the trace point into enqueue_timer() after the new index has been calculated. The debug_object_activate() call remains at the original place. Signed-off-by: Anna-Maria Gleixner Signed-off-by: Thomas Gleixner Cc: fweisbec@gmail.com Cc: peterz@infradead.org Cc: Steven Rostedt Link: https://lkml.kernel.org/r/20190321120921.16463-3-anna-maria@linutronix.de --- kernel/time/timer.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 2fce056f8a49..8d7918ae4d0c 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -536,6 +536,8 @@ static void enqueue_timer(struct timer_base *base, struct timer_list *timer, hlist_add_head(&timer->entry, base->vectors + idx); __set_bit(idx, base->pending_map); timer_set_idx(timer, idx); + + trace_timer_start(timer, timer->expires, timer->flags); } static void @@ -757,13 +759,6 @@ static inline void debug_init(struct timer_list *timer) trace_timer_init(timer); } -static inline void -debug_activate(struct timer_list *timer, unsigned long expires) -{ - debug_timer_activate(timer); - trace_timer_start(timer, expires, timer->flags); -} - static inline void debug_deactivate(struct timer_list *timer) { debug_timer_deactivate(timer); @@ -1037,7 +1032,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires, unsigned int option } } - debug_activate(timer, expires); + debug_timer_activate(timer); timer->expires = expires; /* @@ -1171,7 +1166,7 @@ void add_timer_on(struct timer_list *timer, int cpu) } forward_timer_base(base); - debug_activate(timer, timer->expires); + debug_timer_activate(timer); internal_add_timer(base, timer); raw_spin_unlock_irqrestore(&base->lock, flags); } -- cgit v1.2.3-59-g8ed1b From 6849cbb0f9a8dbc1ba56e9abc6955613103e01e3 Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Thu, 21 Mar 2019 13:09:20 +0100 Subject: timer/trace: Replace deprecated vsprintf pointer extension %pf by %ps Since commit 04b8eb7a4ccd ("symbol lookup: introduce dereference_symbol_descriptor()") %pf is deprecated, because %ps is smart enough to handle function pointer dereference on platforms where such a dereference is required. While at it add proper line breaks to stay in the 80 character limit. Signed-off-by: Anna-Maria Gleixner Signed-off-by: Thomas Gleixner Cc: fweisbec@gmail.com Cc: peterz@infradead.org Cc: Steven Rostedt Link: https://lkml.kernel.org/r/20190321120921.16463-4-anna-maria@linutronix.de --- include/trace/events/timer.h | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index a57e4ee989d6..da975d69c453 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -73,7 +73,7 @@ TRACE_EVENT(timer_start, __entry->flags = flags; ), - TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s", + TP_printk("timer=%p function=%ps expires=%lu [timeout=%ld] cpu=%u idx=%u flags=%s", __entry->timer, __entry->function, __entry->expires, (long)__entry->expires - __entry->now, __entry->flags & TIMER_CPUMASK, @@ -105,7 +105,8 @@ TRACE_EVENT(timer_expire_entry, __entry->function = timer->function; ), - TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now) + TP_printk("timer=%p function=%ps now=%lu", + __entry->timer, __entry->function, __entry->now) ); /** @@ -210,7 +211,7 @@ TRACE_EVENT(hrtimer_start, __entry->mode = mode; ), - TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu " + TP_printk("hrtimer=%p function=%ps expires=%llu softexpires=%llu " "mode=%s", __entry->hrtimer, __entry->function, (unsigned long long) __entry->expires, (unsigned long long) __entry->softexpires, @@ -243,7 +244,8 @@ TRACE_EVENT(hrtimer_expire_entry, __entry->function = hrtimer->function; ), - TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, + TP_printk("hrtimer=%p function=%ps now=%llu", + __entry->hrtimer, __entry->function, (unsigned long long) __entry->now) ); -- cgit v1.2.3-59-g8ed1b From f28d3d5346e97e60c81f933ac89ccf015430e5cf Mon Sep 17 00:00:00 2001 From: Anna-Maria Gleixner Date: Thu, 21 Mar 2019 13:09:21 +0100 Subject: timer/trace: Improve timer tracing Timers are added to the timer wheel off by one. This is required in case a timer is queued directly before incrementing jiffies to prevent early timer expiry. When reading a timer trace and relying only on the expiry time of the timer in the timer_start trace point and on the now in the timer_expiry_entry trace point, it seems that the timer fires late. With the current timer_expiry_entry trace point information only now=jiffies is printed but not the value of base->clk. This makes it impossible to draw a conclusion to the index of base->clk and makes it impossible to examine timer problems without additional trace points. Therefore add the base->clk value to the timer_expire_entry trace point, to be able to calculate the index the timer base is located at during collecting expired timers. Signed-off-by: Anna-Maria Gleixner Signed-off-by: Thomas Gleixner Cc: fweisbec@gmail.com Cc: peterz@infradead.org Cc: Steven Rostedt Link: https://lkml.kernel.org/r/20190321120921.16463-5-anna-maria@linutronix.de --- include/trace/events/timer.h | 11 +++++++---- kernel/time/timer.c | 17 +++++++++++++---- 2 files changed, 20 insertions(+), 8 deletions(-) diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index da975d69c453..b7a904825e7d 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h @@ -89,24 +89,27 @@ TRACE_EVENT(timer_start, */ TRACE_EVENT(timer_expire_entry, - TP_PROTO(struct timer_list *timer), + TP_PROTO(struct timer_list *timer, unsigned long baseclk), - TP_ARGS(timer), + TP_ARGS(timer, baseclk), TP_STRUCT__entry( __field( void *, timer ) __field( unsigned long, now ) __field( void *, function) + __field( unsigned long, baseclk ) ), TP_fast_assign( __entry->timer = timer; __entry->now = jiffies; __entry->function = timer->function; + __entry->baseclk = baseclk; ), - TP_printk("timer=%p function=%ps now=%lu", - __entry->timer, __entry->function, __entry->now) + TP_printk("timer=%p function=%ps now=%lu baseclk=%lu", + __entry->timer, __entry->function, __entry->now, + __entry->baseclk) ); /** diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 8d7918ae4d0c..a9b1bbc2d88d 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c @@ -1293,7 +1293,9 @@ int del_timer_sync(struct timer_list *timer) EXPORT_SYMBOL(del_timer_sync); #endif -static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list *)) +static void call_timer_fn(struct timer_list *timer, + void (*fn)(struct timer_list *), + unsigned long baseclk) { int count = preempt_count(); @@ -1316,7 +1318,7 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list */ lock_map_acquire(&lockdep_map); - trace_timer_expire_entry(timer); + trace_timer_expire_entry(timer, baseclk); fn(timer); trace_timer_expire_exit(timer); @@ -1337,6 +1339,13 @@ static void call_timer_fn(struct timer_list *timer, void (*fn)(struct timer_list static void expire_timers(struct timer_base *base, struct hlist_head *head) { + /* + * This value is required only for tracing. base->clk was + * incremented directly before expire_timers was called. But expiry + * is related to the old base->clk value. + */ + unsigned long baseclk = base->clk - 1; + while (!hlist_empty(head)) { struct timer_list *timer; void (*fn)(struct timer_list *); @@ -1350,11 +1359,11 @@ static void expire_timers(struct timer_base *base, struct hlist_head *head) if (timer->flags & TIMER_IRQSAFE) { raw_spin_unlock(&base->lock); - call_timer_fn(timer, fn); + call_timer_fn(timer, fn, baseclk); raw_spin_lock(&base->lock); } else { raw_spin_unlock_irq(&base->lock); - call_timer_fn(timer, fn); + call_timer_fn(timer, fn, baseclk); raw_spin_lock_irq(&base->lock); } } -- cgit v1.2.3-59-g8ed1b From 7a8e61f8478639072d402a26789055a4a4de8f77 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sat, 23 Mar 2019 11:36:19 +0100 Subject: timekeeping: Force upper bound for setting CLOCK_REALTIME Several people reported testing failures after setting CLOCK_REALTIME close to the limits of the kernel internal representation in nanoseconds, i.e. year 2262. The failures are exposed in subsequent operations, i.e. when arming timers or when the advancing CLOCK_MONOTONIC makes the calculation of CLOCK_REALTIME overflow into negative space. Now people start to paper over the underlying problem by clamping calculations to the valid range, but that's just wrong because such workarounds will prevent detection of real issues as well. It is reasonable to force an upper bound for the various methods of setting CLOCK_REALTIME. Year 2262 is the absolute upper bound. Assume a maximum uptime of 30 years which is plenty enough even for esoteric embedded systems. That results in an upper bound of year 2232 for setting the time. Once that limit is reached in reality this limit is only a small part of the problem space. But until then this stops people from trying to paper over the problem at the wrong places. Reported-by: Xiongfeng Wang Reported-by: Hongbo Yao Signed-off-by: Thomas Gleixner Cc: John Stultz Cc: Stephen Boyd Cc: Miroslav Lichvar Cc: Arnd Bergmann Cc: Richard Cochran Cc: Peter Zijlstra Link: https://lkml.kernel.org/r/alpine.DEB.2.21.1903231125480.2157@nanos.tec.linutronix.de --- include/linux/time64.h | 21 +++++++++++++++++++++ kernel/time/time.c | 2 +- kernel/time/timekeeping.c | 6 +++--- 3 files changed, 25 insertions(+), 4 deletions(-) diff --git a/include/linux/time64.h b/include/linux/time64.h index f38d382ffec1..a620ee610b9f 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h @@ -33,6 +33,17 @@ struct itimerspec64 { #define KTIME_MAX ((s64)~((u64)1 << 63)) #define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) +/* + * Limits for settimeofday(): + * + * To prevent setting the time close to the wraparound point time setting + * is limited so a reasonable uptime can be accomodated. Uptime of 30 years + * should be really sufficient, which means the cutoff is 2232. At that + * point the cutoff is just a small part of the larger problem. + */ +#define TIME_UPTIME_SEC_MAX (30LL * 365 * 24 *3600) +#define TIME_SETTOD_SEC_MAX (KTIME_SEC_MAX - TIME_UPTIME_SEC_MAX) + static inline int timespec64_equal(const struct timespec64 *a, const struct timespec64 *b) { @@ -100,6 +111,16 @@ static inline bool timespec64_valid_strict(const struct timespec64 *ts) return true; } +static inline bool timespec64_valid_settod(const struct timespec64 *ts) +{ + if (!timespec64_valid(ts)) + return false; + /* Disallow values which cause overflow issues vs. CLOCK_REALTIME */ + if ((unsigned long long)ts->tv_sec >= TIME_SETTOD_SEC_MAX) + return false; + return true; +} + /** * timespec64_to_ns - Convert timespec64 to nanoseconds * @ts: pointer to the timespec64 variable to be converted diff --git a/kernel/time/time.c b/kernel/time/time.c index c3f756f8534b..86656bbac232 100644 --- a/kernel/time/time.c +++ b/kernel/time/time.c @@ -171,7 +171,7 @@ int do_sys_settimeofday64(const struct timespec64 *tv, const struct timezone *tz static int firsttime = 1; int error = 0; - if (tv && !timespec64_valid(tv)) + if (tv && !timespec64_valid_settod(tv)) return -EINVAL; error = security_settime64(tv, tz); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 540145da33da..5716e28bfa3c 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -1221,7 +1221,7 @@ int do_settimeofday64(const struct timespec64 *ts) unsigned long flags; int ret = 0; - if (!timespec64_valid_strict(ts)) + if (!timespec64_valid_settod(ts)) return -EINVAL; raw_spin_lock_irqsave(&timekeeper_lock, flags); @@ -1278,7 +1278,7 @@ static int timekeeping_inject_offset(const struct timespec64 *ts) /* Make sure the proposed value is valid */ tmp = timespec64_add(tk_xtime(tk), *ts); if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 || - !timespec64_valid_strict(&tmp)) { + !timespec64_valid_settod(&tmp)) { ret = -EINVAL; goto error; } @@ -1527,7 +1527,7 @@ void __init timekeeping_init(void) unsigned long flags; read_persistent_wall_and_boot_offset(&wall_time, &boot_offset); - if (timespec64_valid_strict(&wall_time) && + if (timespec64_valid_settod(&wall_time) && timespec64_to_ns(&wall_time) > 0) { persistent_clock_exists = true; } else if (timespec64_to_ns(&wall_time) != 0) { -- cgit v1.2.3-59-g8ed1b From aba0954327c831f593702e3a81ef3ad4bec7a838 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Fri, 29 Mar 2019 11:28:52 +0100 Subject: tick/broadcast: Fix warning about undefined tick_broadcast_oneshot_offline() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Randconfig builds with CONFIG_TICK_ONESHOT=y CONFIG_HOTPLUG_CPU=n trigger kernel/time/tick-broadcast.c:39:13: warning: ‘tick_broadcast_oneshot_offline’ \ declared ‘static’ but never defined [-Wunused-function] due to that function's definition missing. Move the CONFIG_HOTPLUG_CPU ifdeffery around its declaration too. Fixes: 1b72d4323798 ("tick: Remove outgoing CPU from broadcast masks") Signed-off-by: Borislav Petkov Acked-by: Thomas Gleixner Reviewed-by: Mukesh Ojha Cc: Valentin Schneider Cc: Frederic Weisbecker Cc: x86@kernel.org Link: https://lkml.kernel.org/r/20190329110508.6621-1-bp@alien8.de --- kernel/time/tick-broadcast.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 0283523de045..7541cbca695e 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -36,12 +36,16 @@ static __cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(tick_broadcast_lock); static void tick_broadcast_setup_oneshot(struct clock_event_device *bc); static void tick_broadcast_clear_oneshot(int cpu); static void tick_resume_broadcast_oneshot(struct clock_event_device *bc); +# ifdef CONFIG_HOTPLUG_CPU static void tick_broadcast_oneshot_offline(unsigned int cpu); +# endif #else static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) { BUG(); } static inline void tick_broadcast_clear_oneshot(int cpu) { } static inline void tick_resume_broadcast_oneshot(struct clock_event_device *bc) { } +# ifdef CONFIG_HOTPLUG_CPU static inline void tick_broadcast_oneshot_offline(unsigned int cpu) { } +# endif #endif /* -- cgit v1.2.3-59-g8ed1b From 13e792a19d4e3a1c64e94197ba357685fd584ded Mon Sep 17 00:00:00 2001 From: Laurent Gauthier Date: Sat, 5 Jan 2019 00:07:45 +0100 Subject: tick: Fix typos in comments Signed-off-by: Laurent Gauthier Cc: Frederic Weisbecker Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/time/tick-broadcast.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 7541cbca695e..e51778c312f1 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -809,13 +809,13 @@ int __tick_broadcast_oneshot_control(enum tick_broadcast_state state) * either the CPU handling the broadcast * interrupt or we got woken by something else. * - * We are not longer in the broadcast mask, so + * We are no longer in the broadcast mask, so * if the cpu local expiry time is already * reached, we would reprogram the cpu local * timer with an already expired event. * * This can lead to a ping-pong when we return - * to idle and therefor rearm the broadcast + * to idle and therefore rearm the broadcast * timer before the cpu local timer was able * to fire. This happens because the forced * reprogramming makes sure that the event -- cgit v1.2.3-59-g8ed1b