aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/tsc.h6
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kernel/tsc_sync.c88
4 files changed, 97 insertions, 1 deletions
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 33b6365c22fe..1ec0595867d9 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -48,6 +48,12 @@ extern int tsc_clocksource_reliable;
extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
+#ifdef CONFIG_X86_TSC
+extern void tsc_store_and_check_tsc_adjust(void);
+#else
+static inline void tsc_store_and_check_tsc_adjust(void) { }
+#endif
+
extern int notsc_setup(char *);
extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 79076d75bdbf..c0ac317dd372 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -75,7 +75,7 @@ apm-y := apm_32.o
obj-$(CONFIG_APM) += apm.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP) += smpboot.o
-obj-$(CONFIG_SMP) += tsc_sync.o
+obj-$(CONFIG_X86_TSC) += tsc_sync.o
obj-$(CONFIG_SMP) += setup_percpu.o
obj-$(CONFIG_X86_MPPARSE) += mpparse.o
obj-y += apic/
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 2b27c5ae9d1f..2bb8de4f3b39 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1379,6 +1379,8 @@ void __init tsc_init(void)
if (unsynchronized_tsc())
mark_tsc_unstable("TSCs unsynchronized");
+ else
+ tsc_store_and_check_tsc_adjust();
check_system_tsc_reliable();
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index 40f8edd55151..bd2bd5e89d96 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -14,12 +14,95 @@
* ( The serial nature of the boot logic and the CPU hotplug lock
* protects against more than 2 CPUs entering this code. )
*/
+#include <linux/topology.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/smp.h>
#include <linux/nmi.h>
#include <asm/tsc.h>
+struct tsc_adjust {
+ s64 bootval;
+ s64 adjusted;
+};
+
+static DEFINE_PER_CPU(struct tsc_adjust, tsc_adjust);
+
+#ifndef CONFIG_SMP
+void __init tsc_store_and_check_tsc_adjust(void)
+{
+ struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
+ s64 bootval;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ return;
+
+ rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+ cur->bootval = bootval;
+ cur->adjusted = bootval;
+ pr_info("TSC ADJUST: Boot CPU0: %lld\n", bootval);
+}
+
+#else /* !CONFIG_SMP */
+
+/*
+ * Store and check the TSC ADJUST MSR if available
+ */
+void tsc_store_and_check_tsc_adjust(void)
+{
+ struct tsc_adjust *ref, *cur = this_cpu_ptr(&tsc_adjust);
+ unsigned int refcpu, cpu = smp_processor_id();
+ s64 bootval;
+
+ if (!boot_cpu_has(X86_FEATURE_TSC_ADJUST))
+ return;
+
+ rdmsrl(MSR_IA32_TSC_ADJUST, bootval);
+ cur->bootval = bootval;
+
+ /*
+ * Check whether this CPU is the first in a package to come up. In
+ * this case do not check the boot value against another package
+ * because the package might have been physically hotplugged, where
+ * TSC_ADJUST is expected to be different.
+ */
+ refcpu = cpumask_any_but(topology_core_cpumask(cpu), cpu);
+
+ if (refcpu >= nr_cpu_ids) {
+ /*
+ * First online CPU in a package stores the boot value in
+ * the adjustment value. This value might change later via
+ * the sync mechanism. If that fails we still can yell
+ * about boot values not being consistent.
+ */
+ cur->adjusted = bootval;
+ pr_info_once("TSC ADJUST: Boot CPU%u: %lld\n", cpu, bootval);
+ return;
+ }
+
+ ref = per_cpu_ptr(&tsc_adjust, refcpu);
+ /*
+ * Compare the boot value and complain if it differs in the
+ * package.
+ */
+ if (bootval != ref->bootval) {
+ pr_warn("TSC ADJUST differs: Reference CPU%u: %lld CPU%u: %lld\n",
+ refcpu, ref->bootval, cpu, bootval);
+ }
+ /*
+ * The TSC_ADJUST values in a package must be the same. If the boot
+ * value on this newly upcoming CPU differs from the adjustment
+ * value of the already online CPU in this package, set it to that
+ * adjusted value.
+ */
+ if (bootval != ref->adjusted) {
+ pr_warn("TSC ADJUST synchronize: Reference CPU%u: %lld CPU%u: %lld\n",
+ refcpu, ref->adjusted, cpu, bootval);
+ cur->adjusted = ref->adjusted;
+ wrmsrl(MSR_IA32_TSC_ADJUST, ref->adjusted);
+ }
+}
+
/*
* Entry/exit counters that make sure that both CPUs
* run the measurement code at once:
@@ -202,6 +285,9 @@ void check_tsc_sync_target(void)
if (unsynchronized_tsc() || tsc_clocksource_reliable)
return;
+ /* Store and check the TSC ADJUST MSR */
+ tsc_store_and_check_tsc_adjust();
+
/*
* Register this CPU's participation and wait for the
* source CPU to start the measurement:
@@ -223,3 +309,5 @@ void check_tsc_sync_target(void)
while (atomic_read(&stop_count) != cpus)
cpu_relax();
}
+
+#endif /* CONFIG_SMP */