aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/entry/vdso/vclock_gettime.c
diff options
context:
space:
mode:
authorAndy Lutomirski <luto@kernel.org>2016-01-04 15:14:28 -0800
committerIngo Molnar <mingo@kernel.org>2016-01-13 11:46:29 +0100
commit78fd8c7288e0a4bba3ad1d69caf9396a6b69cb00 (patch)
treef3f3325a926eab379f0451264a20a3536a911eb3 /arch/x86/entry/vdso/vclock_gettime.c
parentx86/mm: Improve switch_mm() barrier comments (diff)
downloadlinux-dev-78fd8c7288e0a4bba3ad1d69caf9396a6b69cb00.tar.xz
linux-dev-78fd8c7288e0a4bba3ad1d69caf9396a6b69cb00.zip
x86/vdso/pvclock: Protect STABLE check with the seqcount
If the clock becomes unstable while we're reading it, we need to bail. We can do this by simply moving the check into the seqcount loop. Reported-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Andy Lutomirski <luto@kernel.org> Cc: Alexander Graf <agraf@suse.de> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Radim Krcmar <rkrcmar@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/755dcedb17269e1d7ce12a9a713dea303835137e.1451949191.git.luto@kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to '')
-rw-r--r--arch/x86/entry/vdso/vclock_gettime.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 8602f06c759f..1a50e09c945b 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -126,23 +126,23 @@ static notrace cycle_t vread_pvclock(int *mode)
*
* On Xen, we don't appear to have that guarantee, but Xen still
* supplies a valid seqlock using the version field.
-
+ *
* We only do pvclock vdso timing at all if
* PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
* mean that all vCPUs have matching pvti and that the TSC is
* synced, so we can just look at vCPU 0's pvti.
*/
- if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
- *mode = VCLOCK_NONE;
- return 0;
- }
-
do {
version = pvti->version;
smp_rmb();
+ if (unlikely(!(pvti->flags & PVCLOCK_TSC_STABLE_BIT))) {
+ *mode = VCLOCK_NONE;
+ return 0;
+ }
+
tsc = rdtsc_ordered();
pvti_tsc_to_system_mul = pvti->tsc_to_system_mul;
pvti_tsc_shift = pvti->tsc_shift;