aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2015-01-06 10:26:10 -0500
committerTejun Heo <tj@kernel.org>2015-01-06 10:26:10 -0500
commit6810e4a394f9d781050107529b8d1465c00b7b13 (patch)
treebff56a1838d3c0d2e7d2e89a109ba3b739b9f666 /include/linux/percpu-refcount.h
parentLinux 3.19-rc1 (diff)
downloadwireguard-linux-6810e4a394f9d781050107529b8d1465c00b7b13.tar.xz
wireguard-linux-6810e4a394f9d781050107529b8d1465c00b7b13.zip
percpu_ref: remove unnecessary ACCESS_ONCE() in percpu_ref_tryget_live()
__ref_is_percpu() needs the implied ACCESS_ONCE() in lockless_dereference() on @ref->percpu_count_ptr because the value is tested for !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then used as a pointer. If the compiler generates a separate fetch when using it as a pointer, __PERCPU_REF_ATOMIC may be set in between contaminating the pointer value. percpu_ref_tryget_live() also uses ACCESS_ONCE() to test __PERCPU_REF_DEAD; however, there's no reason for this. I just copied ACCESS_ONCE() usage blindly from __ref_is_percpu(). All it does is confusing people trying to understand what's going on. This patch removes the unnecessary ACCESS_ONCE() usage from percpu_ref_tryget_live() and adds a comment explaining why __ref_is_percpu() needs it. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Kent Overstreet <kmo@daterainc.com>
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h20
1 files changed, 17 insertions, 3 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index b4337646388b..6a7a670366ab 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,8 +128,22 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
static inline bool __ref_is_percpu(struct percpu_ref *ref,
unsigned long __percpu **percpu_countp)
{
- /* paired with smp_store_release() in percpu_ref_reinit() */
- unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
+ unsigned long percpu_ptr;
+
+ /*
+ * The value of @ref->percpu_count_ptr is tested for
+ * !__PERCPU_REF_ATOMIC, which may be set asynchronously, and then
+ * used as a pointer. If the compiler generates a separate fetch
+ * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in
+ * between contaminating the pointer value, meaning that
+ * ACCESS_ONCE() is required when fetching it.
+ *
+ * Also, we need a data dependency barrier to be paired with
+ * smp_store_release() in __percpu_ref_switch_to_percpu().
+ *
+ * Use lockless deref which contains both.
+ */
+ percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
/*
* Theoretically, the following could test just ATOMIC; however,
@@ -233,7 +247,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
ret = true;
- } else if (!(ACCESS_ONCE(ref->percpu_count_ptr) & __PERCPU_REF_DEAD)) {
+ } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) {
ret = atomic_long_inc_not_zero(&ref->count);
}