diff options
Diffstat (limited to 'include/linux/percpu-refcount.h')
-rw-r--r-- | include/linux/percpu-refcount.h | 89 |
1 files changed, 49 insertions, 40 deletions
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 22d9d183950d..d73a1c08c3e3 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -51,9 +51,9 @@ #define _LINUX_PERCPU_REFCOUNT_H #include <linux/atomic.h> -#include <linux/kernel.h> #include <linux/percpu.h> #include <linux/rcupdate.h> +#include <linux/types.h> #include <linux/gfp.h> struct percpu_ref; @@ -92,18 +92,30 @@ enum { PERCPU_REF_ALLOW_REINIT = 1 << 2, }; -struct percpu_ref { +struct percpu_ref_data { atomic_long_t count; - /* - * The low bit of the pointer indicates whether the ref is in percpu - * mode; if set, then get/put will manipulate the atomic_t. - */ - unsigned long percpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_switch; bool force_atomic:1; bool allow_reinit:1; struct rcu_head rcu; + struct percpu_ref *ref; +}; + +struct percpu_ref { + /* + * The low bit of the pointer indicates whether the ref is in percpu + * mode; if set, then get/put will manipulate the atomic_t. + */ + unsigned long percpu_count_ptr; + + /* + * 'percpu_ref' is often embedded into user structure, and only + * 'percpu_count_ptr' is required in fast path, move other fields + * into 'percpu_ref_data', so we can reduce memory footprint in + * fast path. + */ + struct percpu_ref_data *data; }; int __must_check percpu_ref_init(struct percpu_ref *ref, @@ -118,6 +130,7 @@ void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); void percpu_ref_resurrect(struct percpu_ref *ref); void percpu_ref_reinit(struct percpu_ref *ref); +bool percpu_ref_is_zero(struct percpu_ref *ref); /** * percpu_ref_kill - drop the initial ref @@ -155,7 +168,7 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, * between contaminating the pointer value, meaning that * READ_ONCE() is required when fetching it. * - * The smp_read_barrier_depends() implied by READ_ONCE() pairs + * The dependency ordering from the READ_ONCE() pairs * with smp_store_release() in __percpu_ref_switch_to_percpu(). */ percpu_ptr = READ_ONCE(ref->percpu_count_ptr); @@ -191,7 +204,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) if (__ref_is_percpu(ref, &percpu_count)) this_cpu_add(*percpu_count, nr); else - atomic_long_add(nr, &ref->count); + atomic_long_add(nr, &ref->data->count); rcu_read_unlock(); } @@ -200,7 +213,7 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) * percpu_ref_get - increment a percpu refcount * @ref: percpu_ref to get * - * Analagous to atomic_long_inc(). + * Analogous to atomic_long_inc(). * * This function is safe to call as long as @ref is between init and exit. */ @@ -231,7 +244,7 @@ static inline bool percpu_ref_tryget_many(struct percpu_ref *ref, this_cpu_add(*percpu_count, nr); ret = true; } else { - ret = atomic_long_add_unless(&ref->count, nr, 0); + ret = atomic_long_add_unless(&ref->data->count, nr, 0); } rcu_read_unlock(); @@ -254,6 +267,28 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) } /** + * percpu_ref_tryget_live_rcu - same as percpu_ref_tryget_live() but the + * caller is responsible for taking RCU. + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline bool percpu_ref_tryget_live_rcu(struct percpu_ref *ref) +{ + unsigned long __percpu *percpu_count; + bool ret = false; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + if (likely(__ref_is_percpu(ref, &percpu_count))) { + this_cpu_inc(*percpu_count); + ret = true; + } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { + ret = atomic_long_inc_not_zero(&ref->data->count); + } + return ret; +} + +/** * percpu_ref_tryget_live - try to increment a live percpu refcount * @ref: percpu_ref to try-get * @@ -270,20 +305,11 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) */ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) { - unsigned long __percpu *percpu_count; bool ret = false; rcu_read_lock(); - - if (__ref_is_percpu(ref, &percpu_count)) { - this_cpu_inc(*percpu_count); - ret = true; - } else if (!(ref->percpu_count_ptr & __PERCPU_REF_DEAD)) { - ret = atomic_long_inc_not_zero(&ref->count); - } - + ret = percpu_ref_tryget_live_rcu(ref); rcu_read_unlock(); - return ret; } @@ -305,8 +331,8 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) if (__ref_is_percpu(ref, &percpu_count)) this_cpu_sub(*percpu_count, nr); - else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) - ref->release(ref); + else if (unlikely(atomic_long_sub_and_test(nr, &ref->data->count))) + ref->data->release(ref); rcu_read_unlock(); } @@ -339,21 +365,4 @@ static inline bool percpu_ref_is_dying(struct percpu_ref *ref) return ref->percpu_count_ptr & __PERCPU_REF_DEAD; } -/** - * percpu_ref_is_zero - test whether a percpu refcount reached zero - * @ref: percpu_ref to test - * - * Returns %true if @ref reached zero. - * - * This function is safe to call as long as @ref is between init and exit. - */ -static inline bool percpu_ref_is_zero(struct percpu_ref *ref) -{ - unsigned long __percpu *percpu_count; - - if (__ref_is_percpu(ref, &percpu_count)) - return false; - return !atomic_long_read(&ref->count); -} - #endif |