aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-14 10:54:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-14 10:54:28 -0800
commitc833e17e276bd5d5f174aa924c4f102754ebc2be (patch)
tree90fb3d7c9b6ab66fa5d651675f054d5a7aa8a01a /arch/x86
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6 (diff)
parentkernel: Fix sparse warning for ACCESS_ONCE (diff)
downloadlinux-dev-c833e17e276bd5d5f174aa924c4f102754ebc2be.tar.xz
linux-dev-c833e17e276bd5d5f174aa924c4f102754ebc2be.zip
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux
Pull ACCESS_ONCE() rule tightening from Christian Borntraeger: "Tighten rules for ACCESS_ONCE This series tightens the rules for ACCESS_ONCE to only work on scalar types. It also contains the necessary fixups as indicated by build bots of linux-next. Now everything is in place to prevent new non-scalar users of ACCESS_ONCE and we can continue to convert code to READ_ONCE/WRITE_ONCE" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/borntraeger/linux: kernel: Fix sparse warning for ACCESS_ONCE next: sh: Fix compile error kernel: tighten rules for ACCESS ONCE mm/gup: Replace ACCESS_ONCE with READ_ONCE x86/spinlock: Leftover conversion ACCESS_ONCE->READ_ONCE x86/xen/p2m: Replace ACCESS_ONCE with READ_ONCE ppc/hugetlbfs: Replace ACCESS_ONCE with READ_ONCE ppc/kvm: Replace ACCESS_ONCE with READ_ONCE
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/spinlock.h4
-rw-r--r--arch/x86/xen/p2m.c2
2 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 625660f8a2fc..7050d864f520 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -183,10 +183,10 @@ static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- __ticket_t head = ACCESS_ONCE(lock->tickets.head);
+ __ticket_t head = READ_ONCE(lock->tickets.head);
for (;;) {
- struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);
+ struct __raw_tickets tmp = READ_ONCE(lock->tickets);
/*
* We need to check "unlocked" in a loop, tmp.head == head
* can be false positive because of overflow.
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index f18fd1d411f6..740ae3026a14 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -550,7 +550,7 @@ static bool alloc_p2m(unsigned long pfn)
mid_mfn = NULL;
}
- p2m_pfn = pte_pfn(ACCESS_ONCE(*ptep));
+ p2m_pfn = pte_pfn(READ_ONCE(*ptep));
if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) ||
p2m_pfn == PFN_DOWN(__pa(p2m_missing))) {
/* p2m leaf page is missing */