diff options
Diffstat (limited to 'arch/x86/kernel/tsc.c')
| -rw-r--r-- | arch/x86/kernel/tsc.c | 38 | 
1 files changed, 38 insertions, 0 deletions
| diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ce8e50239332..d632934cb638 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -626,6 +626,44 @@ static void set_cyc2ns_scale(unsigned long cpu_khz, int cpu)  	local_irq_restore(flags);  } +static unsigned long long cyc2ns_suspend; + +void save_sched_clock_state(void) +{ +	if (!sched_clock_stable) +		return; + +	cyc2ns_suspend = sched_clock(); +} + +/* + * Even on processors with invariant TSC, TSC gets reset in some the + * ACPI system sleep states. And in some systems BIOS seem to reinit TSC to + * arbitrary value (still sync'd across cpu's) during resume from such sleep + * states. To cope up with this, recompute the cyc2ns_offset for each cpu so + * that sched_clock() continues from the point where it was left off during + * suspend. + */ +void restore_sched_clock_state(void) +{ +	unsigned long long offset; +	unsigned long flags; +	int cpu; + +	if (!sched_clock_stable) +		return; + +	local_irq_save(flags); + +	get_cpu_var(cyc2ns_offset) = 0; +	offset = cyc2ns_suspend - sched_clock(); + +	for_each_possible_cpu(cpu) +		per_cpu(cyc2ns_offset, cpu) = offset; + +	local_irq_restore(flags); +} +  #ifdef CONFIG_CPU_FREQ  /* Frequency scaling support. Adjust the TSC based timer when the cpu frequency | 
