diff options
Diffstat (limited to 'include/linux/refcount.h')
-rw-r--r-- | include/linux/refcount.h | 136 |
1 files changed, 99 insertions, 37 deletions
diff --git a/include/linux/refcount.h b/include/linux/refcount.h index 0ac50cf62d06..a62fcca97486 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h @@ -38,11 +38,24 @@ * atomic operations, then the count will continue to edge closer to 0. If it * reaches a value of 1 before /any/ of the threads reset it to the saturated * value, then a concurrent refcount_dec_and_test() may erroneously free the - * underlying object. Given the precise timing details involved with the - * round-robin scheduling of each thread manipulating the refcount and the need - * to hit the race multiple times in succession, there doesn't appear to be a - * practical avenue of attack even if using refcount_add() operations with - * larger increments. + * underlying object. + * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently + * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK). + * With the current PID limit, if no batched refcounting operations are used and + * the attacker can't repeatedly trigger kernel oopses in the middle of refcount + * operations, this makes it impossible for a saturated refcount to leave the + * saturation range, even if it is possible for multiple uses of the same + * refcount to nest in the context of a single task: + * + * (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT = + * 0x40000000 / 0x400000 = 0x100 = 256 + * + * If hundreds of references are added/removed with a single refcounting + * operation, it may potentially be possible to leave the saturation range; but + * given the precise timing details involved with the round-robin scheduling of + * each thread manipulating the refcount and the need to hit the race multiple + * times in succession, there doesn't appear to be a practical avenue of attack + * even if using refcount_add() operations with larger increments. * * Memory ordering * =============== @@ -88,7 +101,7 @@ struct mutex; /** - * struct refcount_t - variant of atomic_t specialized for reference counts + * typedef refcount_t - variant of atomic_t specialized for reference counts * @refs: atomic_t counter field * * The counter saturates at REFCOUNT_SATURATED and will not move once @@ -134,6 +147,24 @@ static inline unsigned int refcount_read(const refcount_t *r) return atomic_read(&r->refs); } +static inline __must_check bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp) +{ + int old = refcount_read(r); + + do { + if (!old) + break; + } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); + + if (oldp) + *oldp = old; + + if (unlikely(old < 0 || old + i < 0)) + refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); + + return old; +} + /** * refcount_add_not_zero - add a value to a refcount unless it is 0 * @i: the value to add to the refcount @@ -154,17 +185,20 @@ static inline unsigned int refcount_read(const refcount_t *r) */ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) { - int old = refcount_read(r); + return __refcount_add_not_zero(i, r, NULL); +} - do { - if (!old) - break; - } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i)); +static inline void __refcount_add(int i, refcount_t *r, int *oldp) +{ + int old = atomic_fetch_add_relaxed(i, &r->refs); - if (unlikely(old < 0 || old + i < 0)) - refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF); + if (oldp) + *oldp = old; - return old; + if (unlikely(!old)) + refcount_warn_saturate(r, REFCOUNT_ADD_UAF); + else if (unlikely(old < 0 || old + i < 0)) + refcount_warn_saturate(r, REFCOUNT_ADD_OVF); } /** @@ -185,12 +219,12 @@ static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r) */ static inline void refcount_add(int i, refcount_t *r) { - int old = atomic_fetch_add_relaxed(i, &r->refs); + __refcount_add(i, r, NULL); +} - if (unlikely(!old)) - refcount_warn_saturate(r, REFCOUNT_ADD_UAF); - else if (unlikely(old < 0 || old + i < 0)) - refcount_warn_saturate(r, REFCOUNT_ADD_OVF); +static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp) +{ + return __refcount_add_not_zero(1, r, oldp); } /** @@ -208,7 +242,12 @@ static inline void refcount_add(int i, refcount_t *r) */ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) { - return refcount_add_not_zero(1, r); + return __refcount_inc_not_zero(r, NULL); +} + +static inline void __refcount_inc(refcount_t *r, int *oldp) +{ + __refcount_add(1, r, oldp); } /** @@ -225,7 +264,25 @@ static inline __must_check bool refcount_inc_not_zero(refcount_t *r) */ static inline void refcount_inc(refcount_t *r) { - refcount_add(1, r); + __refcount_inc(r, NULL); +} + +static inline __must_check bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp) +{ + int old = atomic_fetch_sub_release(i, &r->refs); + + if (oldp) + *oldp = old; + + if (old == i) { + smp_acquire__after_ctrl_dep(); + return true; + } + + if (unlikely(old < 0 || old - i < 0)) + refcount_warn_saturate(r, REFCOUNT_SUB_UAF); + + return false; } /** @@ -250,17 +307,12 @@ static inline void refcount_inc(refcount_t *r) */ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) { - int old = atomic_fetch_sub_release(i, &r->refs); - - if (old == i) { - smp_acquire__after_ctrl_dep(); - return true; - } - - if (unlikely(old < 0 || old - i < 0)) - refcount_warn_saturate(r, REFCOUNT_SUB_UAF); + return __refcount_sub_and_test(i, r, NULL); +} - return false; +static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp) +{ + return __refcount_sub_and_test(1, r, oldp); } /** @@ -278,7 +330,18 @@ static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r) */ static inline __must_check bool refcount_dec_and_test(refcount_t *r) { - return refcount_sub_and_test(1, r); + return __refcount_dec_and_test(r, NULL); +} + +static inline void __refcount_dec(refcount_t *r, int *oldp) +{ + int old = atomic_fetch_sub_release(1, &r->refs); + + if (oldp) + *oldp = old; + + if (unlikely(old <= 1)) + refcount_warn_saturate(r, REFCOUNT_DEC_LEAK); } /** @@ -293,15 +356,14 @@ static inline __must_check bool refcount_dec_and_test(refcount_t *r) */ static inline void refcount_dec(refcount_t *r) { - if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1)) - refcount_warn_saturate(r, REFCOUNT_DEC_LEAK); + __refcount_dec(r, NULL); } extern __must_check bool refcount_dec_if_one(refcount_t *r); extern __must_check bool refcount_dec_not_one(refcount_t *r); -extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock); -extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock); +extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock); +extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock); extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock, - unsigned long *flags); + unsigned long *flags) __cond_acquires(lock); #endif /* _LINUX_REFCOUNT_H */ |