aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2012-04-14 13:18:27 +0200
committerIngo Molnar <mingo@kernel.org>2012-04-14 13:19:04 +0200
commit6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e (patch)
tree021cc9f6b477146fcebe6f3be4752abfa2ba18a9 /kernel/time
parentuprobes/core: Optimize probe hits with the help of a counter (diff)
parentMerge tag 'v3.4-rc2' into perf/core (diff)
downloadlinux-dev-6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e.tar.xz
linux-dev-6ac1ef482d7ae0c690f1640bf6eb818ff9a2d91e.zip
Merge branch 'perf/core' into perf/uprobes
Merge in latest upstream (and the latest perf development tree), to prepare for tooling changes, and also to pick up v3.4 MM changes that the uprobes code needs to take care of. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/alarmtimer.c8
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/ntp.c191
-rw-r--r--kernel/time/tick-broadcast.c4
-rw-r--r--kernel/time/tick-sched.c17
-rw-r--r--kernel/time/timekeeping.c373
6 files changed, 315 insertions, 280 deletions
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 8a46f5d64504..8a538c55fc7b 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -96,6 +96,11 @@ static int alarmtimer_rtc_add_device(struct device *dev,
return 0;
}
+static inline void alarmtimer_rtc_timer_init(void)
+{
+ rtc_timer_init(&rtctimer, NULL, NULL);
+}
+
static struct class_interface alarmtimer_rtc_interface = {
.add_dev = &alarmtimer_rtc_add_device,
};
@@ -117,6 +122,7 @@ static inline struct rtc_device *alarmtimer_get_rtcdev(void)
#define rtcdev (NULL)
static inline int alarmtimer_rtc_interface_setup(void) { return 0; }
static inline void alarmtimer_rtc_interface_remove(void) { }
+static inline void alarmtimer_rtc_timer_init(void) { }
#endif
/**
@@ -783,6 +789,8 @@ static int __init alarmtimer_init(void)
.nsleep = alarm_timer_nsleep,
};
+ alarmtimer_rtc_timer_init();
+
posix_timers_register_clock(CLOCK_REALTIME_ALARM, &alarm_clock);
posix_timers_register_clock(CLOCK_BOOTTIME_ALARM, &alarm_clock);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index a45ca167ab24..c9583382141a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -500,7 +500,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
{
u64 ret;
/*
- * We won't try to correct for more then 11% adjustments (110,000 ppm),
+ * We won't try to correct for more than 11% adjustments (110,000 ppm),
*/
ret = (u64)cs->mult * 11;
do_div(ret,100);
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f6117a4c7cb8..f03fd83b170b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -22,17 +22,18 @@
* NTP timekeeping variables:
*/
+DEFINE_SPINLOCK(ntp_lock);
+
+
/* USER_HZ period (usecs): */
unsigned long tick_usec = TICK_USEC;
/* ACTHZ period (nsecs): */
unsigned long tick_nsec;
-u64 tick_length;
+static u64 tick_length;
static u64 tick_length_base;
-static struct hrtimer leap_timer;
-
#define MAX_TICKADJ 500LL /* usecs */
#define MAX_TICKADJ_SCALED \
(((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
@@ -49,7 +50,7 @@ static struct hrtimer leap_timer;
static int time_state = TIME_OK;
/* clock status bits: */
-int time_status = STA_UNSYNC;
+static int time_status = STA_UNSYNC;
/* TAI offset (secs): */
static long time_tai;
@@ -133,7 +134,7 @@ static inline void pps_reset_freq_interval(void)
/**
* pps_clear - Clears the PPS state variables
*
- * Must be called while holding a write on the xtime_lock
+ * Must be called while holding a write on the ntp_lock
*/
static inline void pps_clear(void)
{
@@ -149,7 +150,7 @@ static inline void pps_clear(void)
* the last PPS signal. When it reaches 0, indicate that PPS signal is
* missing.
*
- * Must be called while holding a write on the xtime_lock
+ * Must be called while holding a write on the ntp_lock
*/
static inline void pps_dec_valid(void)
{
@@ -233,6 +234,17 @@ static inline void pps_fill_timex(struct timex *txc)
#endif /* CONFIG_NTP_PPS */
+
+/**
+ * ntp_synced - Returns 1 if the NTP status is not UNSYNC
+ *
+ */
+static inline int ntp_synced(void)
+{
+ return !(time_status & STA_UNSYNC);
+}
+
+
/*
* NTP methods:
*/
@@ -275,7 +287,7 @@ static inline s64 ntp_update_offset_fll(s64 offset64, long secs)
time_status |= STA_MODE;
- return div_s64(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
+ return div64_long(offset64 << (NTP_SCALE_SHIFT - SHIFT_FLL), secs);
}
static void ntp_update_offset(long offset)
@@ -330,11 +342,13 @@ static void ntp_update_offset(long offset)
/**
* ntp_clear - Clears the NTP state variables
- *
- * Must be called while holding a write on the xtime_lock
*/
void ntp_clear(void)
{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ntp_lock, flags);
+
time_adjust = 0; /* stop active adjtime() */
time_status |= STA_UNSYNC;
time_maxerror = NTP_PHASE_LIMIT;
@@ -347,63 +361,81 @@ void ntp_clear(void)
/* Clear PPS state variables */
pps_clear();
+ spin_unlock_irqrestore(&ntp_lock, flags);
+
+}
+
+
+u64 ntp_tick_length(void)
+{
+ unsigned long flags;
+ s64 ret;
+
+ spin_lock_irqsave(&ntp_lock, flags);
+ ret = tick_length;
+ spin_unlock_irqrestore(&ntp_lock, flags);
+ return ret;
}
+
/*
- * Leap second processing. If in leap-insert state at the end of the
- * day, the system clock is set back one second; if in leap-delete
- * state, the system clock is set ahead one second.
+ * this routine handles the overflow of the microsecond field
+ *
+ * The tricky bits of code to handle the accurate clock support
+ * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
+ * They were originally developed for SUN and DEC kernels.
+ * All the kudos should go to Dave for this stuff.
+ *
+ * Also handles leap second processing, and returns leap offset
*/
-static enum hrtimer_restart ntp_leap_second(struct hrtimer *timer)
+int second_overflow(unsigned long secs)
{
- enum hrtimer_restart res = HRTIMER_NORESTART;
+ s64 delta;
+ int leap = 0;
+ unsigned long flags;
- write_seqlock(&xtime_lock);
+ spin_lock_irqsave(&ntp_lock, flags);
+ /*
+ * Leap second processing. If in leap-insert state at the end of the
+ * day, the system clock is set back one second; if in leap-delete
+ * state, the system clock is set ahead one second.
+ */
switch (time_state) {
case TIME_OK:
+ if (time_status & STA_INS)
+ time_state = TIME_INS;
+ else if (time_status & STA_DEL)
+ time_state = TIME_DEL;
break;
case TIME_INS:
- timekeeping_leap_insert(-1);
- time_state = TIME_OOP;
- printk(KERN_NOTICE
- "Clock: inserting leap second 23:59:60 UTC\n");
- hrtimer_add_expires_ns(&leap_timer, NSEC_PER_SEC);
- res = HRTIMER_RESTART;
+ if (secs % 86400 == 0) {
+ leap = -1;
+ time_state = TIME_OOP;
+ printk(KERN_NOTICE
+ "Clock: inserting leap second 23:59:60 UTC\n");
+ }
break;
case TIME_DEL:
- timekeeping_leap_insert(1);
- time_tai--;
- time_state = TIME_WAIT;
- printk(KERN_NOTICE
- "Clock: deleting leap second 23:59:59 UTC\n");
+ if ((secs + 1) % 86400 == 0) {
+ leap = 1;
+ time_tai--;
+ time_state = TIME_WAIT;
+ printk(KERN_NOTICE
+ "Clock: deleting leap second 23:59:59 UTC\n");
+ }
break;
case TIME_OOP:
time_tai++;
time_state = TIME_WAIT;
- /* fall through */
+ break;
+
case TIME_WAIT:
if (!(time_status & (STA_INS | STA_DEL)))
time_state = TIME_OK;
break;
}
- write_sequnlock(&xtime_lock);
-
- return res;
-}
-
-/*
- * this routine handles the overflow of the microsecond field
- *
- * The tricky bits of code to handle the accurate clock support
- * were provided by Dave Mills (Mills@UDEL.EDU) of NTP fame.
- * They were originally developed for SUN and DEC kernels.
- * All the kudos should go to Dave for this stuff.
- */
-void second_overflow(void)
-{
- s64 delta;
/* Bump the maxerror field */
time_maxerror += MAXFREQ / NSEC_PER_USEC;
@@ -423,30 +455,34 @@ void second_overflow(void)
pps_dec_valid();
if (!time_adjust)
- return;
+ goto out;
if (time_adjust > MAX_TICKADJ) {
time_adjust -= MAX_TICKADJ;
tick_length += MAX_TICKADJ_SCALED;
- return;
+ goto out;
}
if (time_adjust < -MAX_TICKADJ) {
time_adjust += MAX_TICKADJ;
tick_length -= MAX_TICKADJ_SCALED;
- return;
+ goto out;
}
tick_length += (s64)(time_adjust * NSEC_PER_USEC / NTP_INTERVAL_FREQ)
<< NTP_SCALE_SHIFT;
time_adjust = 0;
+
+
+
+out:
+ spin_unlock_irqrestore(&ntp_lock, flags);
+
+ return leap;
}
#ifdef CONFIG_GENERIC_CMOS_UPDATE
-/* Disable the cmos update - used by virtualization and embedded */
-int no_sync_cmos_clock __read_mostly;
-
static void sync_cmos_clock(struct work_struct *work);
static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
@@ -493,35 +529,13 @@ static void sync_cmos_clock(struct work_struct *work)
static void notify_cmos_timer(void)
{
- if (!no_sync_cmos_clock)
- schedule_delayed_work(&sync_cmos_work, 0);
+ schedule_delayed_work(&sync_cmos_work, 0);
}
#else
static inline void notify_cmos_timer(void) { }
#endif
-/*
- * Start the leap seconds timer:
- */
-static inline void ntp_start_leap_timer(struct timespec *ts)
-{
- long now = ts->tv_sec;
-
- if (time_status & STA_INS) {
- time_state = TIME_INS;
- now += 86400 - now % 86400;
- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
-
- return;
- }
-
- if (time_status & STA_DEL) {
- time_state = TIME_DEL;
- now += 86400 - (now + 1) % 86400;
- hrtimer_start(&leap_timer, ktime_set(now, 0), HRTIMER_MODE_ABS);
- }
-}
/*
* Propagate a new txc->status value into the NTP state:
@@ -546,22 +560,6 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts)
time_status &= STA_RONLY;
time_status |= txc->status & ~STA_RONLY;
- switch (time_state) {
- case TIME_OK:
- ntp_start_leap_timer(ts);
- break;
- case TIME_INS:
- case TIME_DEL:
- time_state = TIME_OK;
- ntp_start_leap_timer(ts);
- case TIME_WAIT:
- if (!(time_status & (STA_INS | STA_DEL)))
- time_state = TIME_OK;
- break;
- case TIME_OOP:
- hrtimer_restart(&leap_timer);
- break;
- }
}
/*
* Called with the xtime lock held, so we can access and modify
@@ -643,9 +641,6 @@ int do_adjtimex(struct timex *txc)
(txc->tick < 900000/USER_HZ ||
txc->tick > 1100000/USER_HZ))
return -EINVAL;
-
- if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
- hrtimer_cancel(&leap_timer);
}
if (txc->modes & ADJ_SETOFFSET) {
@@ -663,7 +658,7 @@ int do_adjtimex(struct timex *txc)
getnstimeofday(&ts);
- write_seqlock_irq(&xtime_lock);
+ spin_lock_irq(&ntp_lock);
if (txc->modes & ADJ_ADJTIME) {
long save_adjust = time_adjust;
@@ -705,7 +700,7 @@ int do_adjtimex(struct timex *txc)
/* fill PPS status fields */
pps_fill_timex(txc);
- write_sequnlock_irq(&xtime_lock);
+ spin_unlock_irq(&ntp_lock);
txc->time.tv_sec = ts.tv_sec;
txc->time.tv_usec = ts.tv_nsec;
@@ -903,7 +898,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
pts_norm = pps_normalize_ts(*phase_ts);
- write_seqlock_irqsave(&xtime_lock, flags);
+ spin_lock_irqsave(&ntp_lock, flags);
/* clear the error bits, they will be set again if needed */
time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
@@ -916,7 +911,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
* just start the frequency interval */
if (unlikely(pps_fbase.tv_sec == 0)) {
pps_fbase = *raw_ts;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ spin_unlock_irqrestore(&ntp_lock, flags);
return;
}
@@ -931,7 +926,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
time_status |= STA_PPSJITTER;
/* restart the frequency calibration interval */
pps_fbase = *raw_ts;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ spin_unlock_irqrestore(&ntp_lock, flags);
pr_err("hardpps: PPSJITTER: bad pulse\n");
return;
}
@@ -948,7 +943,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
hardpps_update_phase(pts_norm.nsec);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ spin_unlock_irqrestore(&ntp_lock, flags);
}
EXPORT_SYMBOL(hardpps);
@@ -967,6 +962,4 @@ __setup("ntp_tick_adj=", ntp_tick_adj_setup);
void __init ntp_init(void)
{
ntp_clear();
- hrtimer_init(&leap_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- leap_timer.function = ntp_leap_second;
}
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index fd4a7b1625a2..e883f57a3cd3 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -575,11 +575,15 @@ void tick_broadcast_switch_to_oneshot(void)
unsigned long flags;
raw_spin_lock_irqsave(&tick_broadcast_lock, flags);
+ if (cpumask_empty(tick_get_broadcast_mask()))
+ goto end;
tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT;
bc = tick_broadcast_device.evtdev;
if (bc)
tick_broadcast_setup_oneshot(bc);
+
+end:
raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
}
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 7656642e4b8e..3526038f2836 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -182,11 +182,7 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now)
static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
{
- ktime_t now;
-
- now = ktime_get();
-
- update_ts_time_stats(cpu, ts, now, NULL);
+ ktime_t now = ktime_get();
ts->idle_entrytime = now;
ts->idle_active = 1;
@@ -562,20 +558,21 @@ void tick_nohz_idle_exit(void)
local_irq_disable();
- if (ts->idle_active || (ts->inidle && ts->tick_stopped))
+ WARN_ON_ONCE(!ts->inidle);
+
+ ts->inidle = 0;
+
+ if (ts->idle_active || ts->tick_stopped)
now = ktime_get();
if (ts->idle_active)
tick_nohz_stop_idle(cpu, now);
- if (!ts->inidle || !ts->tick_stopped) {
- ts->inidle = 0;
+ if (!ts->tick_stopped) {
local_irq_enable();
return;
}
- ts->inidle = 0;
-
/* Update jiffies first */
select_nohz_load_balancer(0);
tick_do_update_jiffies64(now);
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 0c6358186401..d66b21308f7c 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -25,6 +25,8 @@
struct timekeeper {
/* Current clocksource used for timekeeping. */
struct clocksource *clock;
+ /* NTP adjusted clock multiplier */
+ u32 mult;
/* The shift value of the current clocksource. */
int shift;
@@ -45,12 +47,47 @@ struct timekeeper {
/* Shift conversion between clock shifted nano seconds and
* ntp shifted nano seconds. */
int ntp_error_shift;
- /* NTP adjusted clock multiplier */
- u32 mult;
+
+ /* The current time */
+ struct timespec xtime;
+ /*
+ * wall_to_monotonic is what we need to add to xtime (or xtime corrected
+ * for sub jiffie times) to get to monotonic time. Monotonic is pegged
+ * at zero at system boot time, so wall_to_monotonic will be negative,
+ * however, we will ALWAYS keep the tv_nsec part positive so we can use
+ * the usual normalization.
+ *
+ * wall_to_monotonic is moved after resume from suspend for the
+ * monotonic time not to jump. We need to add total_sleep_time to
+ * wall_to_monotonic to get the real boot based time offset.
+ *
+ * - wall_to_monotonic is no longer the boot time, getboottime must be
+ * used instead.
+ */
+ struct timespec wall_to_monotonic;
+ /* time spent in suspend */
+ struct timespec total_sleep_time;
+ /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
+ struct timespec raw_time;
+
+ /* Seqlock for all timekeeper values */
+ seqlock_t lock;
};
static struct timekeeper timekeeper;
+/*
+ * This read-write spinlock protects us from races in SMP while
+ * playing with xtime.
+ */
+__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
+
+
+/* flag for if timekeeping is suspended */
+int __read_mostly timekeeping_suspended;
+
+
+
/**
* timekeeper_setup_internals - Set up internals to use clocksource clock.
*
@@ -135,49 +172,18 @@ static inline s64 timekeeping_get_ns_raw(void)
return clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
}
-/*
- * This read-write spinlock protects us from races in SMP while
- * playing with xtime.
- */
-__cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
-
-
-/*
- * The current time
- * wall_to_monotonic is what we need to add to xtime (or xtime corrected
- * for sub jiffie times) to get to monotonic time. Monotonic is pegged
- * at zero at system boot time, so wall_to_monotonic will be negative,
- * however, we will ALWAYS keep the tv_nsec part positive so we can use
- * the usual normalization.
- *
- * wall_to_monotonic is moved after resume from suspend for the monotonic
- * time not to jump. We need to add total_sleep_time to wall_to_monotonic
- * to get the real boot based time offset.
- *
- * - wall_to_monotonic is no longer the boot time, getboottime must be
- * used instead.
- */
-static struct timespec xtime __attribute__ ((aligned (16)));
-static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
-static struct timespec total_sleep_time;
-
-/*
- * The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock.
- */
-static struct timespec raw_time;
-
-/* flag for if timekeeping is suspended */
-int __read_mostly timekeeping_suspended;
-
-/* must hold xtime_lock */
-void timekeeping_leap_insert(int leapsecond)
+/* must hold write on timekeeper.lock */
+static void timekeeping_update(bool clearntp)
{
- xtime.tv_sec += leapsecond;
- wall_to_monotonic.tv_sec -= leapsecond;
- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
- timekeeper.mult);
+ if (clearntp) {
+ timekeeper.ntp_error = 0;
+ ntp_clear();
+ }
+ update_vsyscall(&timekeeper.xtime, &timekeeper.wall_to_monotonic,
+ timekeeper.clock, timekeeper.mult);
}
+
/**
* timekeeping_forward_now - update clock to the current time
*
@@ -202,10 +208,10 @@ static void timekeeping_forward_now(void)
/* If arch requires, add in gettimeoffset() */
nsec += arch_gettimeoffset();
- timespec_add_ns(&xtime, nsec);
+ timespec_add_ns(&timekeeper.xtime, nsec);
nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
- timespec_add_ns(&raw_time, nsec);
+ timespec_add_ns(&timekeeper.raw_time, nsec);
}
/**
@@ -222,15 +228,15 @@ void getnstimeofday(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&timekeeper.lock);
- *ts = xtime;
+ *ts = timekeeper.xtime;
nsecs = timekeeping_get_ns();
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
timespec_add_ns(ts, nsecs);
}
@@ -245,14 +251,16 @@ ktime_t ktime_get(void)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
- secs = xtime.tv_sec + wall_to_monotonic.tv_sec;
- nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec;
+ seq = read_seqbegin(&timekeeper.lock);
+ secs = timekeeper.xtime.tv_sec +
+ timekeeper.wall_to_monotonic.tv_sec;
+ nsecs = timekeeper.xtime.tv_nsec +
+ timekeeper.wall_to_monotonic.tv_nsec;
nsecs += timekeeping_get_ns();
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
/*
* Use ktime_set/ktime_add_ns to create a proper ktime on
* 32-bit architectures without CONFIG_KTIME_SCALAR.
@@ -278,14 +286,14 @@ void ktime_get_ts(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
- *ts = xtime;
- tomono = wall_to_monotonic;
+ seq = read_seqbegin(&timekeeper.lock);
+ *ts = timekeeper.xtime;
+ tomono = timekeeper.wall_to_monotonic;
nsecs = timekeeping_get_ns();
/* If arch requires, add in gettimeoffset() */
nsecs += arch_gettimeoffset();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
ts->tv_nsec + tomono.tv_nsec + nsecs);
@@ -313,10 +321,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
do {
u32 arch_offset;
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&timekeeper.lock);
- *ts_raw = raw_time;
- *ts_real = xtime;
+ *ts_raw = timekeeper.raw_time;
+ *ts_real = timekeeper.xtime;
nsecs_raw = timekeeping_get_ns_raw();
nsecs_real = timekeeping_get_ns();
@@ -326,7 +334,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real)
nsecs_raw += arch_offset;
nsecs_real += arch_offset;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
timespec_add_ns(ts_raw, nsecs_raw);
timespec_add_ns(ts_real, nsecs_real);
@@ -365,23 +373,19 @@ int do_settimeofday(const struct timespec *tv)
if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
timekeeping_forward_now();
- ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
- ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
- wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
+ ts_delta.tv_sec = tv->tv_sec - timekeeper.xtime.tv_sec;
+ ts_delta.tv_nsec = tv->tv_nsec - timekeeper.xtime.tv_nsec;
+ timekeeper.wall_to_monotonic =
+ timespec_sub(timekeeper.wall_to_monotonic, ts_delta);
- xtime = *tv;
-
- timekeeper.ntp_error = 0;
- ntp_clear();
+ timekeeper.xtime = *tv;
+ timekeeping_update(true);
- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
- timekeeper.mult);
-
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -405,20 +409,17 @@ int timekeeping_inject_offset(struct timespec *ts)
if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
return -EINVAL;
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
timekeeping_forward_now();
- xtime = timespec_add(xtime, *ts);
- wall_to_monotonic = timespec_sub(wall_to_monotonic, *ts);
-
- timekeeper.ntp_error = 0;
- ntp_clear();
+ timekeeper.xtime = timespec_add(timekeeper.xtime, *ts);
+ timekeeper.wall_to_monotonic =
+ timespec_sub(timekeeper.wall_to_monotonic, *ts);
- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
- timekeeper.mult);
+ timekeeping_update(true);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -435,9 +436,12 @@ EXPORT_SYMBOL(timekeeping_inject_offset);
static int change_clocksource(void *data)
{
struct clocksource *new, *old;
+ unsigned long flags;
new = (struct clocksource *) data;
+ write_seqlock_irqsave(&timekeeper.lock, flags);
+
timekeeping_forward_now();
if (!new->enable || new->enable(new) == 0) {
old = timekeeper.clock;
@@ -445,6 +449,10 @@ static int change_clocksource(void *data)
if (old->disable)
old->disable(old);
}
+ timekeeping_update(true);
+
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
+
return 0;
}
@@ -490,11 +498,11 @@ void getrawmonotonic(struct timespec *ts)
s64 nsecs;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&timekeeper.lock);
nsecs = timekeeping_get_ns_raw();
- *ts = raw_time;
+ *ts = timekeeper.raw_time;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
timespec_add_ns(ts, nsecs);
}
@@ -510,24 +518,30 @@ int timekeeping_valid_for_hres(void)
int ret;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&timekeeper.lock);
ret = timekeeper.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
return ret;
}
/**
* timekeeping_max_deferment - Returns max time the clocksource can be deferred
- *
- * Caller must observe xtime_lock via read_seqbegin/read_seqretry to
- * ensure that the clocksource does not change!
*/
u64 timekeeping_max_deferment(void)
{
- return timekeeper.clock->max_idle_ns;
+ unsigned long seq;
+ u64 ret;
+ do {
+ seq = read_seqbegin(&timekeeper.lock);
+
+ ret = timekeeper.clock->max_idle_ns;
+
+ } while (read_seqretry(&timekeeper.lock, seq));
+
+ return ret;
}
/**
@@ -572,28 +586,29 @@ void __init timekeeping_init(void)
read_persistent_clock(&now);
read_boot_clock(&boot);
- write_seqlock_irqsave(&xtime_lock, flags);
+ seqlock_init(&timekeeper.lock);
ntp_init();
+ write_seqlock_irqsave(&timekeeper.lock, flags);
clock = clocksource_default_clock();
if (clock->enable)
clock->enable(clock);
timekeeper_setup_internals(clock);
- xtime.tv_sec = now.tv_sec;
- xtime.tv_nsec = now.tv_nsec;
- raw_time.tv_sec = 0;
- raw_time.tv_nsec = 0;
+ timekeeper.xtime.tv_sec = now.tv_sec;
+ timekeeper.xtime.tv_nsec = now.tv_nsec;
+ timekeeper.raw_time.tv_sec = 0;
+ timekeeper.raw_time.tv_nsec = 0;
if (boot.tv_sec == 0 && boot.tv_nsec == 0) {
- boot.tv_sec = xtime.tv_sec;
- boot.tv_nsec = xtime.tv_nsec;
+ boot.tv_sec = timekeeper.xtime.tv_sec;
+ boot.tv_nsec = timekeeper.xtime.tv_nsec;
}
- set_normalized_timespec(&wall_to_monotonic,
+ set_normalized_timespec(&timekeeper.wall_to_monotonic,
-boot.tv_sec, -boot.tv_nsec);
- total_sleep_time.tv_sec = 0;
- total_sleep_time.tv_nsec = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ timekeeper.total_sleep_time.tv_sec = 0;
+ timekeeper.total_sleep_time.tv_nsec = 0;
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
}
/* time in seconds when suspend began */
@@ -614,9 +629,11 @@ static void __timekeeping_inject_sleeptime(struct timespec *delta)
return;
}
- xtime = timespec_add(xtime, *delta);
- wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta);
- total_sleep_time = timespec_add(total_sleep_time, *delta);
+ timekeeper.xtime = timespec_add(timekeeper.xtime, *delta);
+ timekeeper.wall_to_monotonic =
+ timespec_sub(timekeeper.wall_to_monotonic, *delta);
+ timekeeper.total_sleep_time = timespec_add(
+ timekeeper.total_sleep_time, *delta);
}
@@ -640,17 +657,15 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
if (!(ts.tv_sec == 0 && ts.tv_nsec == 0))
return;
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
+
timekeeping_forward_now();
__timekeeping_inject_sleeptime(delta);
- timekeeper.ntp_error = 0;
- ntp_clear();
- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
- timekeeper.mult);
+ timekeeping_update(true);
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
/* signal hrtimers about time change */
clock_was_set();
@@ -673,7 +688,7 @@ static void timekeeping_resume(void)
clocksource_resume();
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
ts = timespec_sub(ts, timekeeping_suspend_time);
@@ -683,7 +698,7 @@ static void timekeeping_resume(void)
timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
timekeeper.ntp_error = 0;
timekeeping_suspended = 0;
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
touch_softlockup_watchdog();
@@ -701,7 +716,7 @@ static int timekeeping_suspend(void)
read_persistent_clock(&timekeeping_suspend_time);
- write_seqlock_irqsave(&xtime_lock, flags);
+ write_seqlock_irqsave(&timekeeper.lock, flags);
timekeeping_forward_now();
timekeeping_suspended = 1;
@@ -711,7 +726,7 @@ static int timekeeping_suspend(void)
* try to compensate so the difference in system time
* and persistent_clock time stays close to constant.
*/
- delta = timespec_sub(xtime, timekeeping_suspend_time);
+ delta = timespec_sub(timekeeper.xtime, timekeeping_suspend_time);
delta_delta = timespec_sub(delta, old_delta);
if (abs(delta_delta.tv_sec) >= 2) {
/*
@@ -724,7 +739,7 @@ static int timekeeping_suspend(void)
timekeeping_suspend_time =
timespec_add(timekeeping_suspend_time, delta_delta);
}
- write_sequnlock_irqrestore(&xtime_lock, flags);
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL);
clocksource_suspend();
@@ -775,7 +790,7 @@ static __always_inline int timekeeping_bigadjust(s64 error, s64 *interval,
* Now calculate the error in (1 << look_ahead) ticks, but first
* remove the single look ahead already included in the error.
*/
- tick_error = tick_length >> (timekeeper.ntp_error_shift + 1);
+ tick_error = ntp_tick_length() >> (timekeeper.ntp_error_shift + 1);
tick_error -= timekeeper.xtime_interval >> 1;
error = ((error - tick_error) >> look_ahead) + tick_error;
@@ -807,7 +822,7 @@ static void timekeeping_adjust(s64 offset)
int adj;
/*
- * The point of this is to check if the error is greater then half
+ * The point of this is to check if the error is greater than half
* an interval.
*
* First we shift it down from NTP_SHIFT to clocksource->shifted nsecs.
@@ -815,7 +830,7 @@ static void timekeeping_adjust(s64 offset)
* Note we subtract one in the shift, so that error is really error*2.
* This "saves" dividing(shifting) interval twice, but keeps the
* (error > interval) comparison as still measuring if error is
- * larger then half an interval.
+ * larger than half an interval.
*
* Note: It does not "save" on aggravation when reading the code.
*/
@@ -823,7 +838,7 @@ static void timekeeping_adjust(s64 offset)
if (error > interval) {
/*
* We now divide error by 4(via shift), which checks if
- * the error is greater then twice the interval.
+ * the error is greater than twice the interval.
* If it is greater, we need a bigadjust, if its smaller,
* we can adjust by 1.
*/
@@ -854,13 +869,15 @@ static void timekeeping_adjust(s64 offset)
} else /* No adjustment needed */
return;
- WARN_ONCE(timekeeper.clock->maxadj &&
- (timekeeper.mult + adj > timekeeper.clock->mult +
- timekeeper.clock->maxadj),
- "Adjusting %s more then 11%% (%ld vs %ld)\n",
+ if (unlikely(timekeeper.clock->maxadj &&
+ (timekeeper.mult + adj >
+ timekeeper.clock->mult + timekeeper.clock->maxadj))) {
+ printk_once(KERN_WARNING
+ "Adjusting %s more than 11%% (%ld vs %ld)\n",
timekeeper.clock->name, (long)timekeeper.mult + adj,
(long)timekeeper.clock->mult +
timekeeper.clock->maxadj);
+ }
/*
* So the following can be confusing.
*
@@ -932,7 +949,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
u64 raw_nsecs;
- /* If the offset is smaller then a shifted interval, do nothing */
+ /* If the offset is smaller than a shifted interval, do nothing */
if (offset < timekeeper.cycle_interval<<shift)
return offset;
@@ -942,23 +959,25 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
timekeeper.xtime_nsec += timekeeper.xtime_interval << shift;
while (timekeeper.xtime_nsec >= nsecps) {
+ int leap;
timekeeper.xtime_nsec -= nsecps;
- xtime.tv_sec++;
- second_overflow();
+ timekeeper.xtime.tv_sec++;
+ leap = second_overflow(timekeeper.xtime.tv_sec);
+ timekeeper.xtime.tv_sec += leap;
}
/* Accumulate raw time */
raw_nsecs = timekeeper.raw_interval << shift;
- raw_nsecs += raw_time.tv_nsec;
+ raw_nsecs += timekeeper.raw_time.tv_nsec;
if (raw_nsecs >= NSEC_PER_SEC) {
u64 raw_secs = raw_nsecs;
raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
- raw_time.tv_sec += raw_secs;
+ timekeeper.raw_time.tv_sec += raw_secs;
}
- raw_time.tv_nsec = raw_nsecs;
+ timekeeper.raw_time.tv_nsec = raw_nsecs;
/* Accumulate error between NTP and clock interval */
- timekeeper.ntp_error += tick_length << shift;
+ timekeeper.ntp_error += ntp_tick_length() << shift;
timekeeper.ntp_error -=
(timekeeper.xtime_interval + timekeeper.xtime_remainder) <<
(timekeeper.ntp_error_shift + shift);
@@ -970,17 +989,19 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
/**
* update_wall_time - Uses the current clocksource to increment the wall time
*
- * Called from the timer interrupt, must hold a write on xtime_lock.
*/
static void update_wall_time(void)
{
struct clocksource *clock;
cycle_t offset;
int shift = 0, maxshift;
+ unsigned long flags;
+
+ write_seqlock_irqsave(&timekeeper.lock, flags);
/* Make sure we're fully resumed: */
if (unlikely(timekeeping_suspended))
- return;
+ goto out;
clock = timekeeper.clock;
@@ -989,20 +1010,21 @@ static void update_wall_time(void)
#else
offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
#endif
- timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
+ timekeeper.xtime_nsec = (s64)timekeeper.xtime.tv_nsec <<
+ timekeeper.shift;
/*
* With NO_HZ we may have to accumulate many cycle_intervals
* (think "ticks") worth of time at once. To do this efficiently,
* we calculate the largest doubling multiple of cycle_intervals
- * that is smaller then the offset. We then accumulate that
+ * that is smaller than the offset. We then accumulate that
* chunk in one go, and then try to consume the next smaller
* doubled multiple.
*/
shift = ilog2(offset) - ilog2(timekeeper.cycle_interval);
shift = max(0, shift);
- /* Bound shift to one less then what overflows tick_length */
- maxshift = (8*sizeof(tick_length) - (ilog2(tick_length)+1)) - 1;
+ /* Bound shift to one less than what overflows tick_length */
+ maxshift = (64 - (ilog2(ntp_tick_length())+1)) - 1;
shift = min(shift, maxshift);
while (offset >= timekeeper.cycle_interval) {
offset = logarithmic_accumulation(offset, shift);
@@ -1040,24 +1062,30 @@ static void update_wall_time(void)
* Store full nanoseconds into xtime after rounding it up and
* add the remainder to the error difference.
*/
- xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
- timekeeper.xtime_nsec -= (s64) xtime.tv_nsec << timekeeper.shift;
+ timekeeper.xtime.tv_nsec = ((s64)timekeeper.xtime_nsec >>
+ timekeeper.shift) + 1;
+ timekeeper.xtime_nsec -= (s64)timekeeper.xtime.tv_nsec <<
+ timekeeper.shift;
timekeeper.ntp_error += timekeeper.xtime_nsec <<
timekeeper.ntp_error_shift;
/*
* Finally, make sure that after the rounding
- * xtime.tv_nsec isn't larger then NSEC_PER_SEC
+ * xtime.tv_nsec isn't larger than NSEC_PER_SEC
*/
- if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
- xtime.tv_nsec -= NSEC_PER_SEC;
- xtime.tv_sec++;
- second_overflow();
+ if (unlikely(timekeeper.xtime.tv_nsec >= NSEC_PER_SEC)) {
+ int leap;
+ timekeeper.xtime.tv_nsec -= NSEC_PER_SEC;
+ timekeeper.xtime.tv_sec++;
+ leap = second_overflow(timekeeper.xtime.tv_sec);
+ timekeeper.xtime.tv_sec += leap;
}
- /* check to see if there is a new clocksource to use */
- update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
- timekeeper.mult);
+ timekeeping_update(false);
+
+out:
+ write_sequnlock_irqrestore(&timekeeper.lock, flags);
+
}
/**
@@ -1074,8 +1102,10 @@ static void update_wall_time(void)
void getboottime(struct timespec *ts)
{
struct timespec boottime = {
- .tv_sec = wall_to_monotonic.tv_sec + total_sleep_time.tv_sec,
- .tv_nsec = wall_to_monotonic.tv_nsec + total_sleep_time.tv_nsec
+ .tv_sec = timekeeper.wall_to_monotonic.tv_sec +
+ timekeeper.total_sleep_time.tv_sec,
+ .tv_nsec = timekeeper.wall_to_monotonic.tv_nsec +
+ timekeeper.total_sleep_time.tv_nsec
};
set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec);
@@ -1101,13 +1131,13 @@ void get_monotonic_boottime(struct timespec *ts)
WARN_ON(timekeeping_suspended);
do {
- seq = read_seqbegin(&xtime_lock);
- *ts = xtime;
- tomono = wall_to_monotonic;
- sleep = total_sleep_time;
+ seq = read_seqbegin(&timekeeper.lock);
+ *ts = timekeeper.xtime;
+ tomono = timekeeper.wall_to_monotonic;
+ sleep = timekeeper.total_sleep_time;
nsecs = timekeeping_get_ns();
- } while (read_seqretry(&xtime_lock, seq));
+ } while (read_seqretry(&timekeeper.lock, seq));
set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec + sleep.tv_sec,
ts->tv_nsec + tomono.tv_nsec + sleep.tv_nsec + nsecs);
@@ -1137,19 +1167,19 @@ EXPORT_SYMBOL_GPL(ktime_get_boottime);
*/
void monotonic_to_bootbased(struct timespec *ts)
{
- *ts = timespec_add(*ts, total_sleep_time);
+ *ts = timespec_add(*ts, timekeeper.total_sleep_time);
}
EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
unsigned long get_seconds(void)
{
- return xtime.tv_sec;
+ return timekeeper.xtime.tv_sec;
}
EXPORT_SYMBOL(get_seconds);
struct timespec __current_kernel_time(void)
{
- return xtime;
+ return timekeeper.xtime;
}
struct timespec current_kernel_time(void)
@@ -1158,10 +1188,10 @@ struct timespec current_kernel_time(void)
unsigned long seq;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&timekeeper.lock);
- now = xtime;
- } while (read_seqretry(&xtime_lock, seq));
+ now = timekeeper.xtime;
+ } while (read_seqretry(&timekeeper.lock, seq));
return now;
}
@@ -1173,11 +1203,11 @@ struct timespec get_monotonic_coarse(void)
unsigned long seq;
do {
- seq = read_seqbegin(&xtime_lock);
+ seq = read_seqbegin(&timekeeper.lock);
- now = xtime;
- mono = wall_to_monotonic;
- } while (read_seqretry(&xtime_lock, seq));
+ now = timekeeper.xtime;
+ mono = timekeeper.wall_to_monotonic;
+ } while (read_seqretry(&timekeeper.lock, seq));
set_normalized_timespec(&now, now.tv_sec + mono.tv_sec,
now.tv_nsec + mono.tv_nsec);
@@ -1209,11 +1239,11 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
unsigned long seq;
do {
- seq = read_seqbegin(&xtime_lock);
- *xtim = xtime;
- *wtom = wall_to_monotonic;
- *sleep = total_sleep_time;
- } while (read_seqretry(&xtime_lock, seq));
+ seq = read_seqbegin(&timekeeper.lock);
+ *xtim = timekeeper.xtime;
+ *wtom = timekeeper.wall_to_monotonic;
+ *sleep = timekeeper.total_sleep_time;
+ } while (read_seqretry(&timekeeper.lock, seq));
}
/**
@@ -1225,11 +1255,14 @@ ktime_t ktime_get_monotonic_offset(void)
struct timespec wtom;
do {
- seq = read_seqbegin(&xtime_lock);
- wtom = wall_to_monotonic;
- } while (read_seqretry(&xtime_lock, seq));
+ seq = read_seqbegin(&timekeeper.lock);
+ wtom = timekeeper.wall_to_monotonic;
+ } while (read_seqretry(&timekeeper.lock, seq));
+
return timespec_to_ktime(wtom);
}
+EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset);
+
/**
* xtime_update() - advances the timekeeping infrastructure