summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordlg <dlg@openbsd.org>2013-05-06 00:15:11 +0000
committerdlg <dlg@openbsd.org>2013-05-06 00:15:11 +0000
commit9a77559928b18e8e96f6ba5c37079e9f088306b6 (patch)
tree3e08427108efe9581406d2108cce1c8340607d38
parentInitialize channel ranges of new slots to full sub-device channels. (diff)
downloadwireguard-openbsd-9a77559928b18e8e96f6ba5c37079e9f088306b6.tar.xz
wireguard-openbsd-9a77559928b18e8e96f6ba5c37079e9f088306b6.zip
the use of modern intel performance counter msrs to measure the number of
cycles per second isnt reliable, particularly inside "virtual" machines. cpuspeed can be calculated as 0, which causes a divide by zero later on which is bad. this goes to more effort to detect if the performance counters are in use by the hypervisor, or detecting if they gave us a cpuspeed of 0 so we can fall through to using rdtsc. ok jsg@
-rw-r--r--sys/arch/i386/include/specialreg.h12
-rw-r--r--sys/arch/i386/isa/clock.c78
2 files changed, 59 insertions, 31 deletions
diff --git a/sys/arch/i386/include/specialreg.h b/sys/arch/i386/include/specialreg.h
index 5f5ed76b9b1..13e036c36df 100644
--- a/sys/arch/i386/include/specialreg.h
+++ b/sys/arch/i386/include/specialreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: specialreg.h,v 1.44 2012/11/10 09:45:05 mglocker Exp $ */
+/* $OpenBSD: specialreg.h,v 1.45 2013/05/06 00:15:11 dlg Exp $ */
/* $NetBSD: specialreg.h,v 1.7 1994/10/27 04:16:26 cgd Exp $ */
/*-
@@ -340,8 +340,14 @@
#define MSR_PERF_FIXED_CTR1 0x30a /* CPU_CLK_Unhalted.Core */
#define MSR_PERF_FIXED_CTR2 0x30b /* CPU_CLK.Unhalted.Ref */
#define MSR_PERF_FIXED_CTR_CTRL 0x38d
-#define MSR_PERF_FIXED_CTR1_EN (1 << 4)
-#define MSR_PERF_FIXED_CTR2_EN (1 << 8)
+#define MSR_PERF_FIXED_CTR_FC_DIS 0x0 /* disable counter */
+#define MSR_PERF_FIXED_CTR_FC_1 0x1 /* count ring 1 */
+#define MSR_PERF_FIXED_CTR_FC_123 0x2 /* count rings 1,2,3 */
+#define MSR_PERF_FIXED_CTR_FC_ANY 0x3 /* count everything */
+#define MSR_PERF_FIXED_CTR_FC_MASK 0x3
+#define MSR_PERF_FIXED_CTR_FC(_i, _v) ((_v) << (4 * (_i)))
+#define MSR_PERF_FIXED_CTR_ANYTHR(_i) (0x4 << (4 * (_i)))
+#define MSR_PERF_FIXED_CTR_INT(_i) (0x8 << (4 * (_i)))
#define MSR_PERF_GLOBAL_CTRL 0x38f
#define MSR_PERF_GLOBAL_CTR1_EN (1ULL << 33)
#define MSR_PERF_GLOBAL_CTR2_EN (1ULL << 34)
diff --git a/sys/arch/i386/isa/clock.c b/sys/arch/i386/isa/clock.c
index 2517de57f7b..50487a73e63 100644
--- a/sys/arch/i386/isa/clock.c
+++ b/sys/arch/i386/isa/clock.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: clock.c,v 1.48 2013/04/17 18:35:47 gerhard Exp $ */
+/* $OpenBSD: clock.c,v 1.49 2013/05/06 00:15:11 dlg Exp $ */
/* $NetBSD: clock.c,v 1.39 1996/05/12 23:11:54 mycroft Exp $ */
/*-
@@ -115,6 +115,7 @@ int hexdectodec(int);
int dectohexdec(int);
int rtcintr(void *);
void rtcdrain(void *);
+int calibrate_cyclecounter_ctr(void);
u_int mc146818_read(void *, u_int);
void mc146818_write(void *, u_int, u_int);
@@ -373,37 +374,58 @@ i8254_delay(int n)
}
}
-void
-calibrate_cyclecounter(void)
+int
+calibrate_cyclecounter_ctr(void)
{
- struct cpu_info *ci;
+ struct cpu_info *ci = curcpu();
unsigned long long count, last_count, msr;
- ci = curcpu();
-
- if ((ci->ci_flags & CPUF_CONST_TSC) &&
- (cpu_perf_eax & CPUIDEAX_VERID) > 1 &&
- CPUIDEDX_NUM_FC(cpu_perf_edx) > 1) {
- msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL) | MSR_PERF_FIXED_CTR1_EN;
- wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
- msr = rdmsr(MSR_PERF_GLOBAL_CTRL) | MSR_PERF_GLOBAL_CTR1_EN;
- wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
-
- last_count = rdmsr(MSR_PERF_FIXED_CTR1);
- delay(1000000);
- count = rdmsr(MSR_PERF_FIXED_CTR1);
-
- msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL);
- msr &= ~MSR_PERF_FIXED_CTR1_EN;
- wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
- msr = rdmsr(MSR_PERF_GLOBAL_CTRL);
- msr &= ~MSR_PERF_GLOBAL_CTR1_EN;
- wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
- } else {
- __asm __volatile("rdtsc" : "=A" (last_count));
- delay(1000000);
- __asm __volatile("rdtsc" : "=A" (count));
+ if ((ci->ci_flags & CPUF_CONST_TSC) == 0 ||
+ (cpu_perf_eax & CPUIDEAX_VERID) <= 1 ||
+ CPUIDEDX_NUM_FC(cpu_perf_edx) <= 1)
+ return (-1);
+
+ msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL);
+ if (msr & MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_MASK)) {
+ /* some hypervisor is dicking us around */
+ return (-1);
}
+
+ msr |= MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_1);
+ wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
+
+ msr = rdmsr(MSR_PERF_GLOBAL_CTRL) | MSR_PERF_GLOBAL_CTR1_EN;
+ wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
+
+ last_count = rdmsr(MSR_PERF_FIXED_CTR1);
+ delay(1000000);
+ count = rdmsr(MSR_PERF_FIXED_CTR1);
+
+ msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL);
+ msr &= MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_MASK);
+ wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);
+
+ msr = rdmsr(MSR_PERF_GLOBAL_CTRL);
+ msr &= ~MSR_PERF_GLOBAL_CTR1_EN;
+ wrmsr(MSR_PERF_GLOBAL_CTRL, msr);
+
+ cpuspeed = ((count - last_count) + 999999) / 1000000;
+
+ return (cpuspeed == 0 ? -1 : 0);
+}
+
+void
+calibrate_cyclecounter(void)
+{
+ unsigned long long count, last_count;
+
+ if (calibrate_cyclecounter_ctr() == 0)
+ return;
+
+ __asm __volatile("rdtsc" : "=A" (last_count));
+ delay(1000000);
+ __asm __volatile("rdtsc" : "=A" (count));
+
cpuspeed = ((count - last_count) + 999999) / 1000000;
}