aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/percpu-refcount.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-09-24 13:31:49 -0400
committerTejun Heo <tj@kernel.org>2014-09-24 13:31:49 -0400
commit27344a9017cdaff82a167827da3001a0918afdc3 (patch)
tree025e5eb1351f394a83e3400e221bd3149b6eb6a4 /include/linux/percpu-refcount.h
parentpercpu_ref: rename things to prepare for decoupling percpu/atomic mode switch (diff)
downloadlinux-dev-27344a9017cdaff82a167827da3001a0918afdc3.tar.xz
linux-dev-27344a9017cdaff82a167827da3001a0918afdc3.zip
percpu_ref: add PCPU_REF_DEAD
percpu_ref will be restructured so that percpu/atomic mode switching and reference killing are dedoupled. In preparation, add PCPU_REF_DEAD and PCPU_REF_ATOMIC_DEAD which is OR of ATOMIC and DEAD. For now, ATOMIC and DEAD are changed together and all PCPU_REF_ATOMIC uses are converted to PCPU_REF_ATOMIC_DEAD without causing any behavior changes. percpu_ref_init() now specifies an explicit alignment when allocating the percpu counters so that the pointer has enough unused low bits to accomodate the flags. Note that one flag was fine as min alignment for percpu memory is 2 bytes but two flags are already too many for the natural alignment of unsigned longs on archs like cris and m68k. v2: The original patch had BUILD_BUG_ON() which triggers if unsigned long's alignment isn't enough to accomodate the flags, which triggered on cris and m64k. percpu_ref_init() updated to specify the required alignment explicitly. Reported by Fengguang. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Kent Overstreet <kmo@daterainc.com> Cc: kbuild test robot <fengguang.wu@intel.com>
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r--include/linux/percpu-refcount.h6
1 files changed, 5 insertions, 1 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 910e5f72055d..bd9483d390b4 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -57,6 +57,10 @@ typedef void (percpu_ref_func_t)(struct percpu_ref *);
/* flags set in the lower bits of percpu_ref->percpu_count_ptr */
enum {
__PERCPU_REF_ATOMIC = 1LU << 0, /* operating in atomic mode */
+ __PERCPU_REF_DEAD = 1LU << 1, /* (being) killed */
+ __PERCPU_REF_ATOMIC_DEAD = __PERCPU_REF_ATOMIC | __PERCPU_REF_DEAD,
+
+ __PERCPU_REF_FLAG_BITS = 2,
};
struct percpu_ref {
@@ -107,7 +111,7 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
/* paired with smp_store_release() in percpu_ref_reinit() */
smp_read_barrier_depends();
- if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC))
+ if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
return false;
*percpu_countp = (unsigned long __percpu *)percpu_ptr;