aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/atomic_ops.txt4
-rw-r--r--Documentation/locking/lockstat.txt2
-rw-r--r--Documentation/memory-barriers.txt17
-rw-r--r--arch/alpha/include/asm/atomic.h8
-rw-r--r--arch/arc/include/asm/atomic.h8
-rw-r--r--arch/arm/include/asm/atomic.h4
-rw-r--r--arch/arm64/include/asm/atomic.h2
-rw-r--r--arch/avr32/include/asm/atomic.h4
-rw-r--r--arch/frv/include/asm/atomic.h4
-rw-r--r--arch/h8300/include/asm/atomic.h4
-rw-r--r--arch/hexagon/include/asm/atomic.h2
-rw-r--r--arch/ia64/include/asm/atomic.h8
-rw-r--r--arch/m32r/include/asm/atomic.h4
-rw-r--r--arch/m68k/include/asm/atomic.h4
-rw-r--r--arch/metag/include/asm/atomic_lnkget.h2
-rw-r--r--arch/metag/include/asm/atomic_lock1.h2
-rw-r--r--arch/mips/include/asm/atomic.h8
-rw-r--r--arch/mn10300/include/asm/atomic.h4
-rw-r--r--arch/parisc/include/asm/atomic.h2
-rw-r--r--arch/sh/include/asm/atomic.h4
-rw-r--r--arch/sparc/include/asm/atomic_64.h8
-rw-r--r--arch/tile/include/asm/atomic.h2
-rw-r--r--arch/tile/include/asm/atomic_64.h6
-rw-r--r--arch/x86/include/asm/atomic.h4
-rw-r--r--arch/x86/include/asm/atomic64_64.h4
-rw-r--r--arch/xtensa/include/asm/atomic.h4
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c2
-rw-r--r--drivers/phy/phy-rcar-gen2.c3
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--include/asm-generic/atomic-long.h27
-rw-r--r--include/asm-generic/atomic.h4
-rw-r--r--include/asm-generic/qrwlock_types.h4
-rw-r--r--include/linux/atomic.h21
-rw-r--r--kernel/futex.c13
-rw-r--r--kernel/locking/osq_lock.c11
-rw-r--r--kernel/locking/qrwlock.c8
-rw-r--r--kernel/locking/qspinlock_paravirt.h6
37 files changed, 132 insertions, 94 deletions
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt
index b19fc34efdb1..c9d1cacb4395 100644
--- a/Documentation/atomic_ops.txt
+++ b/Documentation/atomic_ops.txt
@@ -542,6 +542,10 @@ The routines xchg() and cmpxchg() must provide the same exact
memory-barrier semantics as the atomic and bit operations returning
values.
+Note: If someone wants to use xchg(), cmpxchg() and their variants,
+linux/atomic.h should be included rather than asm/cmpxchg.h, unless
+the code is in arch/* and can take care of itself.
+
Spinlocks and rwlocks have memory barrier expectations as well.
The rule to follow is simple:
diff --git a/Documentation/locking/lockstat.txt b/Documentation/locking/lockstat.txt
index 568bbbacee91..5786ad2cd5e6 100644
--- a/Documentation/locking/lockstat.txt
+++ b/Documentation/locking/lockstat.txt
@@ -12,7 +12,7 @@ Because things like lock contention can severely impact performance.
- HOW
Lockdep already has hooks in the lock functions and maps lock instances to
-lock classes. We build on that (see Documentation/lokcing/lockdep-design.txt).
+lock classes. We build on that (see Documentation/locking/lockdep-design.txt).
The graph below shows the relation between the lock functions and the various
hooks therein.
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index 2ba8461b0631..41ffd7e9cdcf 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -637,7 +637,8 @@ as follows:
b = p; /* BUG: Compiler and CPU can both reorder!!! */
Finally, the READ_ONCE_CTRL() includes an smp_read_barrier_depends()
-that DEC Alpha needs in order to respect control depedencies.
+that DEC Alpha needs in order to respect control depedencies. Alternatively
+use one of atomic{,64}_read_ctrl().
So don't leave out the READ_ONCE_CTRL().
@@ -796,9 +797,9 @@ site: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html.
In summary:
- (*) Control dependencies must be headed by READ_ONCE_CTRL().
- Or, as a much less preferable alternative, interpose
- smp_read_barrier_depends() between a READ_ONCE() and the
+ (*) Control dependencies must be headed by READ_ONCE_CTRL(),
+ atomic{,64}_read_ctrl(). Or, as a much less preferable alternative,
+ interpose smp_read_barrier_depends() between a READ_ONCE() and the
control-dependent write.
(*) Control dependencies can order prior loads against later stores.
@@ -820,10 +821,10 @@ In summary:
and WRITE_ONCE() can help to preserve the needed conditional.
(*) Control dependencies require that the compiler avoid reordering the
- dependency into nonexistence. Careful use of READ_ONCE_CTRL()
- or smp_read_barrier_depends() can help to preserve your control
- dependency. Please see the Compiler Barrier section for more
- information.
+ dependency into nonexistence. Careful use of READ_ONCE_CTRL(),
+ atomic{,64}_read_ctrl() or smp_read_barrier_depends() can help to
+ preserve your control dependency. Please see the Compiler Barrier
+ section for more information.
(*) Control dependencies pair normally with other types of barriers.
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index e8c956098424..572b228c44c7 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -17,11 +17,11 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) ((v)->counter = (i))
-#define atomic64_set(v,i) ((v)->counter = (i))
+#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
+#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/*
* To get proper branch prediction for the main line, we must branch
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index c3ecda023e3a..7730d302cadb 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -17,11 +17,11 @@
#include <asm/barrier.h>
#include <asm/smp.h>
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
#ifdef CONFIG_ARC_HAS_LLSC
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#ifdef CONFIG_ARC_STAR_9000923308
@@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
#ifndef CONFIG_SMP
/* violating atomic_xxx API locking protocol in UP for optimization sake */
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#else
@@ -125,7 +125,7 @@ static inline void atomic_set(atomic_t *v, int i)
unsigned long flags;
atomic_ops_lock(flags);
- v->counter = i;
+ WRITE_ONCE(v->counter, i);
atomic_ops_unlock(flags);
}
diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
index fe3ef397f5a4..2bf80afb7841 100644
--- a/arch/arm/include/asm/atomic.h
+++ b/arch/arm/include/asm/atomic.h
@@ -27,8 +27,8 @@
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#if __LINUX_ARM_ARCH__ >= 6
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 35a67783cfa0..1e247ac2601a 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,7 +54,7 @@
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define atomic_xchg(v, new) xchg(&((v)->counter), (new))
#define atomic_cmpxchg(v, old, new) cmpxchg(&((v)->counter), (old), (new))
diff --git a/arch/avr32/include/asm/atomic.h b/arch/avr32/include/asm/atomic.h
index 97c9bdf83409..d74fd8ce980a 100644
--- a/arch/avr32/include/asm/atomic.h
+++ b/arch/avr32/include/asm/atomic.h
@@ -19,8 +19,8 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = i)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP_RETURN(op, asm_op, asm_con) \
static inline int __atomic_##op##_return(int i, atomic_t *v) \
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 0da689def4cc..64f02d451aa8 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -32,8 +32,8 @@
*/
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
static inline int atomic_inc_return(atomic_t *v)
{
diff --git a/arch/h8300/include/asm/atomic.h b/arch/h8300/include/asm/atomic.h
index 702ee539f87d..4435a445ae7e 100644
--- a/arch/h8300/include/asm/atomic.h
+++ b/arch/h8300/include/asm/atomic.h
@@ -11,8 +11,8 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = i)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#include <linux/kernel.h>
diff --git a/arch/hexagon/include/asm/atomic.h b/arch/hexagon/include/asm/atomic.h
index 811d61f6422d..55696c4100d4 100644
--- a/arch/hexagon/include/asm/atomic.h
+++ b/arch/hexagon/include/asm/atomic.h
@@ -48,7 +48,7 @@ static inline void atomic_set(atomic_t *v, int new)
*
* Assumes all word reads on our architecture are atomic.
*/
-#define atomic_read(v) ((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
/**
* atomic_xchg - atomic
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index be4beeb77d57..8dfb5f6f6c35 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -21,11 +21,11 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v,i) (((v)->counter) = (i))
-#define atomic64_set(v,i) (((v)->counter) = (i))
+#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
+#define atomic64_set(v,i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op, c_op) \
static __inline__ int \
diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h
index 025e2a170493..ea35160d632b 100644
--- a/arch/m32r/include/asm/atomic.h
+++ b/arch/m32r/include/asm/atomic.h
@@ -28,7 +28,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -37,7 +37,7 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v,i) (((v)->counter) = (i))
+#define atomic_set(v,i) WRITE_ONCE(((v)->counter), (i))
#ifdef CONFIG_CHIP_M32700_TS1
#define __ATOMIC_CLOBBER , "r4"
diff --git a/arch/m68k/include/asm/atomic.h b/arch/m68k/include/asm/atomic.h
index 039fac120cc0..4858178260f9 100644
--- a/arch/m68k/include/asm/atomic.h
+++ b/arch/m68k/include/asm/atomic.h
@@ -17,8 +17,8 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = i)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
/*
* The ColdFire parts cannot do some immediate to memory operations,
diff --git a/arch/metag/include/asm/atomic_lnkget.h b/arch/metag/include/asm/atomic_lnkget.h
index 21c4c268b86c..a62581815624 100644
--- a/arch/metag/include/asm/atomic_lnkget.h
+++ b/arch/metag/include/asm/atomic_lnkget.h
@@ -3,7 +3,7 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_set(v, i) ((v)->counter = (i))
+#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
#include <linux/compiler.h>
diff --git a/arch/metag/include/asm/atomic_lock1.h b/arch/metag/include/asm/atomic_lock1.h
index f8efe380fe8b..0295d9b8d5bf 100644
--- a/arch/metag/include/asm/atomic_lock1.h
+++ b/arch/metag/include/asm/atomic_lock1.h
@@ -10,7 +10,7 @@
static inline int atomic_read(const atomic_t *v)
{
- return (v)->counter;
+ return READ_ONCE((v)->counter);
}
/*
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 4c42fd9af777..f82d3af07931 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -30,7 +30,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
/*
* atomic_set - set atomic variable
@@ -39,7 +39,7 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v, i) ((v)->counter = (i))
+#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
#define ATOMIC_OP(op, c_op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
@@ -315,14 +315,14 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* @v: pointer of type atomic64_t
*
*/
-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
+#define atomic64_read(v) READ_ONCE((v)->counter)
/*
* atomic64_set - set atomic variable
* @v: pointer of type atomic64_t
* @i: required value
*/
-#define atomic64_set(v, i) ((v)->counter = (i))
+#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
#define ATOMIC64_OP(op, c_op, asm_op) \
static __inline__ void atomic64_##op(long i, atomic64_t * v) \
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h
index 375e59140c9c..ce318d5ab23b 100644
--- a/arch/mn10300/include/asm/atomic.h
+++ b/arch/mn10300/include/asm/atomic.h
@@ -34,7 +34,7 @@
*
* Atomically reads the value of @v. Note that the guaranteed
*/
-#define atomic_read(v) (ACCESS_ONCE((v)->counter))
+#define atomic_read(v) READ_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -43,7 +43,7 @@
*
* Atomically sets the value of @v to @i. Note that the guaranteed
*/
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
index 2536965d00ea..1d109990a022 100644
--- a/arch/parisc/include/asm/atomic.h
+++ b/arch/parisc/include/asm/atomic.h
@@ -67,7 +67,7 @@ static __inline__ void atomic_set(atomic_t *v, int i)
static __inline__ int atomic_read(const atomic_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
/* exported interface */
diff --git a/arch/sh/include/asm/atomic.h b/arch/sh/include/asm/atomic.h
index 05b9f74ce2d5..c399e1c55685 100644
--- a/arch/sh/include/asm/atomic.h
+++ b/arch/sh/include/asm/atomic.h
@@ -14,8 +14,8 @@
#define ATOMIC_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic_set(v,i) ((v)->counter = (i))
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if defined(CONFIG_GUSA_RB)
#include <asm/atomic-grb.h>
diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
index 917084ace49d..f2fbf9e16faf 100644
--- a/arch/sparc/include/asm/atomic_64.h
+++ b/arch/sparc/include/asm/atomic_64.h
@@ -14,11 +14,11 @@
#define ATOMIC_INIT(i) { (i) }
#define ATOMIC64_INIT(i) { (i) }
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
-#define atomic64_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
+#define atomic64_read(v) READ_ONCE((v)->counter)
-#define atomic_set(v, i) (((v)->counter) = i)
-#define atomic64_set(v, i) (((v)->counter) = i)
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
+#define atomic64_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define ATOMIC_OP(op) \
void atomic_##op(int, atomic_t *); \
diff --git a/arch/tile/include/asm/atomic.h b/arch/tile/include/asm/atomic.h
index 709798460763..9fc0107a9c5e 100644
--- a/arch/tile/include/asm/atomic.h
+++ b/arch/tile/include/asm/atomic.h
@@ -34,7 +34,7 @@
*/
static inline int atomic_read(const atomic_t *v)
{
- return ACCESS_ONCE(v->counter);
+ return READ_ONCE(v->counter);
}
/**
diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
index 096a56d6ead4..51cabc26e387 100644
--- a/arch/tile/include/asm/atomic_64.h
+++ b/arch/tile/include/asm/atomic_64.h
@@ -24,7 +24,7 @@
/* First, the 32-bit atomic ops that are "real" on our 64-bit platform. */
-#define atomic_set(v, i) ((v)->counter = (i))
+#define atomic_set(v, i) WRITE_ONCE((v)->counter, (i))
/*
* The smp_mb() operations throughout are to support the fact that
@@ -82,8 +82,8 @@ static inline void atomic_xor(int i, atomic_t *v)
#define ATOMIC64_INIT(i) { (i) }
-#define atomic64_read(v) ((v)->counter)
-#define atomic64_set(v, i) ((v)->counter = (i))
+#define atomic64_read(v) READ_ONCE((v)->counter)
+#define atomic64_set(v, i) WRITE_ONCE((v)->counter, (i))
static inline void atomic64_add(long i, atomic64_t *v)
{
diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
index fb52aa644aab..ae5fb83e6d91 100644
--- a/arch/x86/include/asm/atomic.h
+++ b/arch/x86/include/asm/atomic.h
@@ -24,7 +24,7 @@
*/
static __always_inline int atomic_read(const atomic_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
/**
@@ -36,7 +36,7 @@ static __always_inline int atomic_read(const atomic_t *v)
*/
static __always_inline void atomic_set(atomic_t *v, int i)
{
- v->counter = i;
+ WRITE_ONCE(v->counter, i);
}
/**
diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
index 50e33eff58de..037351022f54 100644
--- a/arch/x86/include/asm/atomic64_64.h
+++ b/arch/x86/include/asm/atomic64_64.h
@@ -18,7 +18,7 @@
*/
static inline long atomic64_read(const atomic64_t *v)
{
- return ACCESS_ONCE((v)->counter);
+ return READ_ONCE((v)->counter);
}
/**
@@ -30,7 +30,7 @@ static inline long atomic64_read(const atomic64_t *v)
*/
static inline void atomic64_set(atomic64_t *v, long i)
{
- v->counter = i;
+ WRITE_ONCE(v->counter, i);
}
/**
diff --git a/arch/xtensa/include/asm/atomic.h b/arch/xtensa/include/asm/atomic.h
index 93795d047303..fd8017ce298a 100644
--- a/arch/xtensa/include/asm/atomic.h
+++ b/arch/xtensa/include/asm/atomic.h
@@ -47,7 +47,7 @@
*
* Atomically reads the value of @v.
*/
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
/**
* atomic_set - set atomic variable
@@ -56,7 +56,7 @@
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v,i) ((v)->counter = (i))
+#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if XCHAL_HAVE_S32C1I
#define ATOMIC_OP(op) \
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 98d172b04f71..a9b9460de0d6 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -9,7 +9,7 @@
#include <linux/delay.h>
#include <linux/moduleparam.h>
-#include <asm/cmpxchg.h>
+#include <linux/atomic.h>
#include "net_driver.h"
#include "nic.h"
#include "io.h"
diff --git a/drivers/phy/phy-rcar-gen2.c b/drivers/phy/phy-rcar-gen2.c
index 6e0d9fa8e1d1..c7a05996d5c1 100644
--- a/drivers/phy/phy-rcar-gen2.c
+++ b/drivers/phy/phy-rcar-gen2.c
@@ -17,8 +17,7 @@
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
-
-#include <asm/cmpxchg.h>
+#include <linux/atomic.h>
#define USBHS_LPSTS 0x02
#define USBHS_UGCTRL 0x80
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index 98af3b1f2d2a..aa5ab6c80ed4 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -7,7 +7,7 @@
#include <linux/workqueue.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
-#include <asm/cmpxchg.h>
+#include <linux/atomic.h>
#include "speakup.h"
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
index a94cbebbc33d..8942cdc676a7 100644
--- a/include/asm-generic/atomic-long.h
+++ b/include/asm-generic/atomic-long.h
@@ -35,7 +35,7 @@ typedef atomic_t atomic_long_t;
#endif
#define ATOMIC_LONG_READ_OP(mo) \
-static inline long atomic_long_read##mo(atomic_long_t *l) \
+static inline long atomic_long_read##mo(const atomic_long_t *l) \
{ \
ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
\
@@ -43,6 +43,7 @@ static inline long atomic_long_read##mo(atomic_long_t *l) \
}
ATOMIC_LONG_READ_OP()
ATOMIC_LONG_READ_OP(_acquire)
+ATOMIC_LONG_READ_OP(_ctrl)
#undef ATOMIC_LONG_READ_OP
@@ -112,19 +113,23 @@ static inline void atomic_long_dec(atomic_long_t *l)
ATOMIC_LONG_PFX(_dec)(v);
}
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
-
- ATOMIC_LONG_PFX(_add)(i, v);
+#define ATOMIC_LONG_OP(op) \
+static inline void \
+atomic_long_##op(long i, atomic_long_t *l) \
+{ \
+ ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l; \
+ \
+ ATOMIC_LONG_PFX(_##op)(i, v); \
}
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
- ATOMIC_LONG_PFX(_t) *v = (ATOMIC_LONG_PFX(_t) *)l;
+ATOMIC_LONG_OP(add)
+ATOMIC_LONG_OP(sub)
+ATOMIC_LONG_OP(and)
+ATOMIC_LONG_OP(or)
+ATOMIC_LONG_OP(xor)
+ATOMIC_LONG_OP(andnot)
- ATOMIC_LONG_PFX(_sub)(i, v);
-}
+#undef ATOMIC_LONG_OP
static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
{
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index d4d7e337fdcb..74f1a3704d7a 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -127,7 +127,7 @@ ATOMIC_OP(xor, ^)
* Atomically reads the value of @v.
*/
#ifndef atomic_read
-#define atomic_read(v) ACCESS_ONCE((v)->counter)
+#define atomic_read(v) READ_ONCE((v)->counter)
#endif
/**
@@ -137,7 +137,7 @@ ATOMIC_OP(xor, ^)
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v, i) (((v)->counter) = (i))
+#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#include <linux/irqflags.h>
diff --git a/include/asm-generic/qrwlock_types.h b/include/asm-generic/qrwlock_types.h
index 4d76f24df518..0abc6b6062fb 100644
--- a/include/asm-generic/qrwlock_types.h
+++ b/include/asm-generic/qrwlock_types.h
@@ -10,12 +10,12 @@
typedef struct qrwlock {
atomic_t cnts;
- arch_spinlock_t lock;
+ arch_spinlock_t wait_lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { \
.cnts = ATOMIC_INIT(0), \
- .lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ .wait_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
}
#endif /* __ASM_GENERIC_QRWLOCK_TYPES_H */
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 00a5763e850e..e326469bb9d6 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -4,6 +4,15 @@
#include <asm/atomic.h>
#include <asm/barrier.h>
+#ifndef atomic_read_ctrl
+static inline int atomic_read_ctrl(const atomic_t *v)
+{
+ int val = atomic_read(v);
+ smp_read_barrier_depends(); /* Enforce control dependency. */
+ return val;
+}
+#endif
+
/*
* Relaxed variants of xchg, cmpxchg and some atomic operations.
*
@@ -451,11 +460,19 @@ static inline int atomic_dec_if_positive(atomic_t *v)
}
#endif
-#include <asm-generic/atomic-long.h>
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#endif
+#ifndef atomic64_read_ctrl
+static inline long long atomic64_read_ctrl(const atomic64_t *v)
+{
+ long long val = atomic64_read(v);
+ smp_read_barrier_depends(); /* Enforce control dependency. */
+ return val;
+}
+#endif
+
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
@@ -463,4 +480,6 @@ static inline void atomic64_andnot(long long i, atomic64_t *v)
}
#endif
+#include <asm-generic/atomic-long.h>
+
#endif /* _LINUX_ATOMIC_H */
diff --git a/kernel/futex.c b/kernel/futex.c
index 6e443efc65f4..dfc86e93c31d 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -255,9 +255,18 @@ struct futex_hash_bucket {
struct plist_head chain;
} ____cacheline_aligned_in_smp;
-static unsigned long __read_mostly futex_hashsize;
+/*
+ * The base of the bucket array and its size are always used together
+ * (after initialization only in hash_futex()), so ensure that they
+ * reside in the same cacheline.
+ */
+static struct {
+ struct futex_hash_bucket *queues;
+ unsigned long hashsize;
+} __futex_data __read_mostly __aligned(2*sizeof(long));
+#define futex_queues (__futex_data.queues)
+#define futex_hashsize (__futex_data.hashsize)
-static struct futex_hash_bucket *futex_queues;
/*
* Fault injections for futexes.
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index dc85ee23a26f..d092a0c9c2d4 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -50,7 +50,7 @@ osq_wait_next(struct optimistic_spin_queue *lock,
for (;;) {
if (atomic_read(&lock->tail) == curr &&
- atomic_cmpxchg(&lock->tail, curr, old) == curr) {
+ atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) {
/*
* We were the last queued, we moved @lock back. @prev
* will now observe @lock and will complete its
@@ -92,7 +92,11 @@ bool osq_lock(struct optimistic_spin_queue *lock)
node->next = NULL;
node->cpu = curr;
- old = atomic_xchg(&lock->tail, curr);
+ /*
+ * ACQUIRE semantics, pairs with corresponding RELEASE
+ * in unlock() uncontended, or fastpath.
+ */
+ old = atomic_xchg_acquire(&lock->tail, curr);
if (old == OSQ_UNLOCKED_VAL)
return true;
@@ -184,7 +188,8 @@ void osq_unlock(struct optimistic_spin_queue *lock)
/*
* Fast path for the uncontended case.
*/
- if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
+ if (likely(atomic_cmpxchg_release(&lock->tail, curr,
+ OSQ_UNLOCKED_VAL) == curr))
return;
/*
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index f17a3e3b3550..fec082338668 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -86,7 +86,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
/*
* Put the reader into the wait queue
*/
- arch_spin_lock(&lock->lock);
+ arch_spin_lock(&lock->wait_lock);
/*
* The ACQUIRE semantics of the following spinning code ensure
@@ -99,7 +99,7 @@ void queued_read_lock_slowpath(struct qrwlock *lock, u32 cnts)
/*
* Signal the next one in queue to become queue head
*/
- arch_spin_unlock(&lock->lock);
+ arch_spin_unlock(&lock->wait_lock);
}
EXPORT_SYMBOL(queued_read_lock_slowpath);
@@ -112,7 +112,7 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
u32 cnts;
/* Put the writer into the wait queue */
- arch_spin_lock(&lock->lock);
+ arch_spin_lock(&lock->wait_lock);
/* Try to acquire the lock directly if no reader is present */
if (!atomic_read(&lock->cnts) &&
@@ -144,6 +144,6 @@ void queued_write_lock_slowpath(struct qrwlock *lock)
cpu_relax_lowlatency();
}
unlock:
- arch_spin_unlock(&lock->lock);
+ arch_spin_unlock(&lock->wait_lock);
}
EXPORT_SYMBOL(queued_write_lock_slowpath);
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index c8e6e9a596f5..f0450ff4829b 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -267,7 +267,6 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
}
if (!lp) { /* ONCE */
- WRITE_ONCE(pn->state, vcpu_hashed);
lp = pv_hash(lock, pn);
/*
@@ -275,11 +274,9 @@ static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
* when we observe _Q_SLOW_VAL in __pv_queued_spin_unlock()
* we'll be sure to be able to observe our hash entry.
*
- * [S] pn->state
* [S] <hash> [Rmw] l->locked == _Q_SLOW_VAL
* MB RMB
* [RmW] l->locked = _Q_SLOW_VAL [L] <unhash>
- * [L] pn->state
*
* Matches the smp_rmb() in __pv_queued_spin_unlock().
*/
@@ -364,8 +361,7 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
* vCPU is harmless other than the additional latency in completing
* the unlock.
*/
- if (READ_ONCE(node->state) == vcpu_hashed)
- pv_kick(node->cpu);
+ pv_kick(node->cpu);
}
/*
* Include the architecture specific callee-save thunk of the