diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 16:02:40 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-11-26 16:02:40 -0800 |
commit | 168829ad09ca9cdfdc664b2110d0e3569932c12d (patch) | |
tree | 1b6351ab5766a272dec1fc08f77272a199bba978 /lib | |
parent | 1ae78780eda54023a0fb49ee743dbba39da148e0 (diff) | |
parent | 500543c53a54134ced386aed85cd93cf1363f981 (diff) |
Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
"The main changes in this cycle were:
- A comprehensive rewrite of the robust/PI futex code's exit handling
to fix various exit races. (Thomas Gleixner et al)
- Rework the generic REFCOUNT_FULL implementation using
atomic_fetch_* operations so that the performance impact of the
cmpxchg() loops is mitigated for common refcount operations.
With these performance improvements the generic implementation of
refcount_t should be good enough for everybody - and this got
confirmed by performance testing, so remove ARCH_HAS_REFCOUNT and
REFCOUNT_FULL entirely, leaving the generic implementation enabled
unconditionally. (Will Deacon)
- Other misc changes, fixes, cleanups"
* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (27 commits)
lkdtm: Remove references to CONFIG_REFCOUNT_FULL
locking/refcount: Remove unused 'refcount_error_report()' function
locking/refcount: Consolidate implementations of refcount_t
locking/refcount: Consolidate REFCOUNT_{MAX,SATURATED} definitions
locking/refcount: Move saturation warnings out of line
locking/refcount: Improve performance of generic REFCOUNT_FULL code
locking/refcount: Move the bulk of the REFCOUNT_FULL implementation into the <linux/refcount.h> header
locking/refcount: Remove unused refcount_*_checked() variants
locking/refcount: Ensure integer operands are treated as signed
locking/refcount: Define constants for saturation and max refcount values
futex: Prevent exit livelock
futex: Provide distinct return value when owner is exiting
futex: Add mutex around futex exit
futex: Provide state handling for exec() as well
futex: Sanitize exit state handling
futex: Mark the begin of futex exit explicitly
futex: Set task::futex_state to DEAD right after handling futex exit
futex: Split futex_mm_release() for exit/exec
exit/exec: Seperate mm_release()
futex: Replace PF_EXITPIDONE with a state
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/locking-selftest.c | 24 | ||||
-rw-r--r-- | lib/refcount.c | 255 | ||||
-rw-r--r-- | lib/smp_processor_id.c | 2 |
3 files changed, 40 insertions, 241 deletions
diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index a1705545e6ac..14f44f59e733 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -1475,7 +1475,7 @@ static void ww_test_edeadlk_normal(void) mutex_lock(&o2.base); o2.ctx = &t2; - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); WWAI(&t); t2 = t; @@ -1500,7 +1500,7 @@ static void ww_test_edeadlk_normal_slow(void) int ret; mutex_lock(&o2.base); - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; WWAI(&t); @@ -1527,7 +1527,7 @@ static void ww_test_edeadlk_no_unlock(void) mutex_lock(&o2.base); o2.ctx = &t2; - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); WWAI(&t); t2 = t; @@ -1551,7 +1551,7 @@ static void ww_test_edeadlk_no_unlock_slow(void) int ret; mutex_lock(&o2.base); - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; WWAI(&t); @@ -1576,7 +1576,7 @@ static void ww_test_edeadlk_acquire_more(void) int ret; mutex_lock(&o2.base); - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; WWAI(&t); @@ -1597,7 +1597,7 @@ static void ww_test_edeadlk_acquire_more_slow(void) int ret; mutex_lock(&o2.base); - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; WWAI(&t); @@ -1618,11 +1618,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk(void) int ret; mutex_lock(&o2.base); - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; mutex_lock(&o3.base); - mutex_release(&o3.base.dep_map, 1, _THIS_IP_); + mutex_release(&o3.base.dep_map, _THIS_IP_); o3.ctx = &t2; WWAI(&t); @@ -1644,11 +1644,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk_slow(void) int ret; mutex_lock(&o2.base); - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; mutex_lock(&o3.base); - mutex_release(&o3.base.dep_map, 1, _THIS_IP_); + mutex_release(&o3.base.dep_map, _THIS_IP_); o3.ctx = &t2; WWAI(&t); @@ -1669,7 +1669,7 @@ static void ww_test_edeadlk_acquire_wrong(void) int ret; mutex_lock(&o2.base); - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; WWAI(&t); @@ -1694,7 +1694,7 @@ static void ww_test_edeadlk_acquire_wrong_slow(void) int ret; mutex_lock(&o2.base); - mutex_release(&o2.base.dep_map, 1, _THIS_IP_); + mutex_release(&o2.base.dep_map, _THIS_IP_); o2.ctx = &t2; WWAI(&t); diff --git a/lib/refcount.c b/lib/refcount.c index 6e904af0fb3e..ebac8b7d15a7 100644 --- a/lib/refcount.c +++ b/lib/refcount.c @@ -1,41 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Variant of atomic_t specialized for reference counts. - * - * The interface matches the atomic_t interface (to aid in porting) but only - * provides the few functions one should use for reference counting. - * - * It differs in that the counter saturates at UINT_MAX and will not move once - * there. This avoids wrapping the counter and causing 'spurious' - * use-after-free issues. - * - * Memory ordering rules are slightly relaxed wrt regular atomic_t functions - * and provide only what is strictly required for refcounts. - * - * The increments are fully relaxed; these will not provide ordering. The - * rationale is that whatever is used to obtain the object we're increasing the - * reference count on will provide the ordering. For locked data structures, - * its the lock acquire, for RCU/lockless data structures its the dependent - * load. - * - * Do note that inc_not_zero() provides a control dependency which will order - * future stores against the inc, this ensures we'll never modify the object - * if we did not in fact acquire a reference. - * - * The decrements will provide release order, such that all the prior loads and - * stores will be issued before, it also provides a control dependency, which - * will order us against the subsequent free(). - * - * The control dependency is against the load of the cmpxchg (ll/sc) that - * succeeded. This means the stores aren't fully ordered, but this is fine - * because the 1->0 transition indicates no concurrency. - * - * Note that the allocator is responsible for ordering things between free() - * and alloc(). - * - * The decrements dec_and_test() and sub_and_test() also provide acquire - * ordering on success. - * + * Out-of-line refcount functions. */ #include <linux/mutex.h> @@ -43,199 +8,33 @@ #include <linux/spinlock.h> #include <linux/bug.h> -/** - * refcount_add_not_zero_checked - add a value to a refcount unless it is 0 - * @i: the value to add to the refcount - * @r: the refcount - * - * Will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - * - * Use of this function is not recommended for the normal reference counting - * use case in which references are taken and released one at a time. In these - * cases, refcount_inc(), or one of its variants, should instead be used to - * increment a reference count. - * - * Return: false if the passed refcount is 0, true otherwise - */ -bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r) -{ - unsigned int new, val = atomic_read(&r->refs); - - do { - if (!val) - return false; - - if (unlikely(val == UINT_MAX)) - return true; - - new = val + i; - if (new < val) - new = UINT_MAX; - - } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); - - WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} -EXPORT_SYMBOL(refcount_add_not_zero_checked); - -/** - * refcount_add_checked - add a value to a refcount - * @i: the value to add to the refcount - * @r: the refcount - * - * Similar to atomic_add(), but will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - * - * Use of this function is not recommended for the normal reference counting - * use case in which references are taken and released one at a time. In these - * cases, refcount_inc(), or one of its variants, should instead be used to - * increment a reference count. - */ -void refcount_add_checked(unsigned int i, refcount_t *r) -{ - WARN_ONCE(!refcount_add_not_zero_checked(i, r), "refcount_t: addition on 0; use-after-free.\n"); -} -EXPORT_SYMBOL(refcount_add_checked); - -/** - * refcount_inc_not_zero_checked - increment a refcount unless it is 0 - * @r: the refcount to increment - * - * Similar to atomic_inc_not_zero(), but will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller has guaranteed the - * object memory to be stable (RCU, etc.). It does provide a control dependency - * and thereby orders future stores. See the comment on top. - * - * Return: true if the increment was successful, false otherwise - */ -bool refcount_inc_not_zero_checked(refcount_t *r) -{ - unsigned int new, val = atomic_read(&r->refs); - - do { - new = val + 1; - - if (!val) - return false; - - if (unlikely(!new)) - return true; - - } while (!atomic_try_cmpxchg_relaxed(&r->refs, &val, new)); +#define REFCOUNT_WARN(str) WARN_ONCE(1, "refcount_t: " str ".\n") - WARN_ONCE(new == UINT_MAX, "refcount_t: saturated; leaking memory.\n"); - - return true; -} -EXPORT_SYMBOL(refcount_inc_not_zero_checked); - -/** - * refcount_inc_checked - increment a refcount - * @r: the refcount to increment - * - * Similar to atomic_inc(), but will saturate at UINT_MAX and WARN. - * - * Provides no memory ordering, it is assumed the caller already has a - * reference on the object. - * - * Will WARN if the refcount is 0, as this represents a possible use-after-free - * condition. - */ -void refcount_inc_checked(refcount_t *r) +void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t) { - WARN_ONCE(!refcount_inc_not_zero_checked(r), "refcount_t: increment on 0; use-after-free.\n"); -} -EXPORT_SYMBOL(refcount_inc_checked); - -/** - * refcount_sub_and_test_checked - subtract from a refcount and test if it is 0 - * @i: amount to subtract from the refcount - * @r: the refcount - * - * Similar to atomic_dec_and_test(), but it will WARN, return false and - * ultimately leak on underflow and will fail to decrement when saturated - * at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides an acquire ordering on success such that free() - * must come after. - * - * Use of this function is not recommended for the normal reference counting - * use case in which references are taken and released one at a time. In these - * cases, refcount_dec(), or one of its variants, should instead be used to - * decrement a reference count. - * - * Return: true if the resulting refcount is 0, false otherwise - */ -bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r) -{ - unsigned int new, val = atomic_read(&r->refs); - - do { - if (unlikely(val == UINT_MAX)) - return false; - - new = val - i; - if (new > val) { - WARN_ONCE(new > val, "refcount_t: underflow; use-after-free.\n"); - return false; - } - - } while (!atomic_try_cmpxchg_release(&r->refs, &val, new)); - - if (!new) { - smp_acquire__after_ctrl_dep(); - return true; + refcount_set(r, REFCOUNT_SATURATED); + + switch (t) { + case REFCOUNT_ADD_NOT_ZERO_OVF: + REFCOUNT_WARN("saturated; leaking memory"); + break; + case REFCOUNT_ADD_OVF: + REFCOUNT_WARN("saturated; leaking memory"); + break; + case REFCOUNT_ADD_UAF: + REFCOUNT_WARN("addition on 0; use-after-free"); + break; + case REFCOUNT_SUB_UAF: + REFCOUNT_WARN("underflow; use-after-free"); + break; + case REFCOUNT_DEC_LEAK: + REFCOUNT_WARN("decrement hit 0; leaking memory"); + break; + default: + REFCOUNT_WARN("unknown saturation event!?"); } - return false; - -} -EXPORT_SYMBOL(refcount_sub_and_test_checked); - -/** - * refcount_dec_and_test_checked - decrement a refcount and test if it is 0 - * @r: the refcount - * - * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before, and provides an acquire ordering on success such that free() - * must come after. - * - * Return: true if the resulting refcount is 0, false otherwise - */ -bool refcount_dec_and_test_checked(refcount_t *r) -{ - return refcount_sub_and_test_checked(1, r); -} -EXPORT_SYMBOL(refcount_dec_and_test_checked); - -/** - * refcount_dec_checked - decrement a refcount - * @r: the refcount - * - * Similar to atomic_dec(), it will WARN on underflow and fail to decrement - * when saturated at UINT_MAX. - * - * Provides release memory ordering, such that prior loads and stores are done - * before. - */ -void refcount_dec_checked(refcount_t *r) -{ - WARN_ONCE(refcount_dec_and_test_checked(r), "refcount_t: decrement hit 0; leaking memory.\n"); } -EXPORT_SYMBOL(refcount_dec_checked); +EXPORT_SYMBOL(refcount_warn_saturate); /** * refcount_dec_if_one - decrement a refcount if it is 1 @@ -277,7 +76,7 @@ bool refcount_dec_not_one(refcount_t *r) unsigned int new, val = atomic_read(&r->refs); do { - if (unlikely(val == UINT_MAX)) + if (unlikely(val == REFCOUNT_SATURATED)) return true; if (val == 1) @@ -302,7 +101,7 @@ EXPORT_SYMBOL(refcount_dec_not_one); * @lock: the mutex to be locked * * Similar to atomic_dec_and_mutex_lock(), it will WARN on underflow and fail - * to decrement when saturated at UINT_MAX. + * to decrement when saturated at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. @@ -333,7 +132,7 @@ EXPORT_SYMBOL(refcount_dec_and_mutex_lock); * @lock: the spinlock to be locked * * Similar to atomic_dec_and_lock(), it will WARN on underflow and fail to - * decrement when saturated at UINT_MAX. + * decrement when saturated at REFCOUNT_SATURATED. * * Provides release memory ordering, such that prior loads and stores are done * before, and provides a control dependency such that free() must come after. diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 60ba93fc42ce..bd9571653288 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -23,7 +23,7 @@ unsigned int check_preemption_disabled(const char *what1, const char *what2) * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ - if (cpumask_equal(current->cpus_ptr, cpumask_of(this_cpu))) + if (current->nr_cpus_allowed == 1) goto out; /* |