diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 11:45:29 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2021-06-28 11:45:29 -0700 |
commit | a15286c63d113d4296c58867994cd266a28f5d6d (patch) | |
tree | aaebbc86918c0c1943c26115d5bb1dd6c2fc2d9b /lib | |
parent | b89c07dea16137696d0f2d479ef665ef7c1022ab (diff) | |
parent | 0e8a89d49d45197770f2e57fb15f1bc9ded96eb0 (diff) |
Merge tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking updates from Ingo Molnar:
- Core locking & atomics:
- Convert all architectures to ARCH_ATOMIC: move every architecture
to ARCH_ATOMIC, then get rid of ARCH_ATOMIC and all the
transitory facilities and #ifdefs.
Much reduction in complexity from that series:
63 files changed, 756 insertions(+), 4094 deletions(-)
- Self-test enhancements
- Futexes:
- Add the new FUTEX_LOCK_PI2 ABI, which is a variant that doesn't
set FLAGS_CLOCKRT (.e. uses CLOCK_MONOTONIC).
[ The temptation to repurpose FUTEX_LOCK_PI's implicit setting of
FLAGS_CLOCKRT & invert the flag's meaning to avoid having to
introduce a new variant was resisted successfully. ]
- Enhance futex self-tests
- Lockdep:
- Fix dependency path printouts
- Optimize trace saving
- Broaden & fix wait-context checks
- Misc cleanups and fixes.
* tag 'locking-core-2021-06-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (52 commits)
locking/lockdep: Correct the description error for check_redundant()
futex: Provide FUTEX_LOCK_PI2 to support clock selection
futex: Prepare futex_lock_pi() for runtime clock selection
lockdep/selftest: Remove wait-type RCU_CALLBACK tests
lockdep/selftests: Fix selftests vs PROVE_RAW_LOCK_NESTING
lockdep: Fix wait-type for empty stack
locking/selftests: Add a selftest for check_irq_usage()
lockding/lockdep: Avoid to find wrong lock dep path in check_irq_usage()
locking/lockdep: Remove the unnecessary trace saving
locking/lockdep: Fix the dep path printing for backwards BFS
selftests: futex: Add futex compare requeue test
selftests: futex: Add futex wait test
seqlock: Remove trailing semicolon in macros
locking/lockdep: Reduce LOCKDEP dependency list
locking/lockdep,doc: Improve readability of the block matrix
locking/atomics: atomic-instrumented: simplify ifdeffery
locking/atomic: delete !ARCH_ATOMIC remnants
locking/atomic: xtensa: move to ARCH_ATOMIC
locking/atomic: sparc: move to ARCH_ATOMIC
locking/atomic: sh: move to ARCH_ATOMIC
...
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 1 | ||||
-rw-r--r-- | lib/atomic64.c | 36 | ||||
-rw-r--r-- | lib/locking-selftest.c | 83 |
3 files changed, 84 insertions, 36 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 678c13967580..1e1bd6f4a13d 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1372,7 +1372,6 @@ config LOCKDEP bool depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT select STACKTRACE - depends on FRAME_POINTER || MIPS || PPC || S390 || MICROBLAZE || ARM || ARC || X86 select KALLSYMS select KALLSYMS_ALL diff --git a/lib/atomic64.c b/lib/atomic64.c index e98c85a99787..3df653994177 100644 --- a/lib/atomic64.c +++ b/lib/atomic64.c @@ -42,7 +42,7 @@ static inline raw_spinlock_t *lock_addr(const atomic64_t *v) return &atomic64_lock[addr & (NR_LOCKS - 1)].lock; } -s64 atomic64_read(const atomic64_t *v) +s64 generic_atomic64_read(const atomic64_t *v) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); @@ -53,9 +53,9 @@ s64 atomic64_read(const atomic64_t *v) raw_spin_unlock_irqrestore(lock, flags); return val; } -EXPORT_SYMBOL(atomic64_read); +EXPORT_SYMBOL(generic_atomic64_read); -void atomic64_set(atomic64_t *v, s64 i) +void generic_atomic64_set(atomic64_t *v, s64 i) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); @@ -64,10 +64,10 @@ void atomic64_set(atomic64_t *v, s64 i) v->counter = i; raw_spin_unlock_irqrestore(lock, flags); } -EXPORT_SYMBOL(atomic64_set); +EXPORT_SYMBOL(generic_atomic64_set); #define ATOMIC64_OP(op, c_op) \ -void atomic64_##op(s64 a, atomic64_t *v) \ +void generic_atomic64_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ @@ -76,10 +76,10 @@ void atomic64_##op(s64 a, atomic64_t *v) \ v->counter c_op a; \ raw_spin_unlock_irqrestore(lock, flags); \ } \ -EXPORT_SYMBOL(atomic64_##op); +EXPORT_SYMBOL(generic_atomic64_##op); #define ATOMIC64_OP_RETURN(op, c_op) \ -s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ +s64 generic_atomic64_##op##_return(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ @@ -90,10 +90,10 @@ s64 atomic64_##op##_return(s64 a, atomic64_t *v) \ raw_spin_unlock_irqrestore(lock, flags); \ return val; \ } \ -EXPORT_SYMBOL(atomic64_##op##_return); +EXPORT_SYMBOL(generic_atomic64_##op##_return); #define ATOMIC64_FETCH_OP(op, c_op) \ -s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ +s64 generic_atomic64_fetch_##op(s64 a, atomic64_t *v) \ { \ unsigned long flags; \ raw_spinlock_t *lock = lock_addr(v); \ @@ -105,7 +105,7 @@ s64 atomic64_fetch_##op(s64 a, atomic64_t *v) \ raw_spin_unlock_irqrestore(lock, flags); \ return val; \ } \ -EXPORT_SYMBOL(atomic64_fetch_##op); +EXPORT_SYMBOL(generic_atomic64_fetch_##op); #define ATOMIC64_OPS(op, c_op) \ ATOMIC64_OP(op, c_op) \ @@ -130,7 +130,7 @@ ATOMIC64_OPS(xor, ^=) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -s64 atomic64_dec_if_positive(atomic64_t *v) +s64 generic_atomic64_dec_if_positive(atomic64_t *v) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); @@ -143,9 +143,9 @@ s64 atomic64_dec_if_positive(atomic64_t *v) raw_spin_unlock_irqrestore(lock, flags); return val; } -EXPORT_SYMBOL(atomic64_dec_if_positive); +EXPORT_SYMBOL(generic_atomic64_dec_if_positive); -s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) +s64 generic_atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); @@ -158,9 +158,9 @@ s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n) raw_spin_unlock_irqrestore(lock, flags); return val; } -EXPORT_SYMBOL(atomic64_cmpxchg); +EXPORT_SYMBOL(generic_atomic64_cmpxchg); -s64 atomic64_xchg(atomic64_t *v, s64 new) +s64 generic_atomic64_xchg(atomic64_t *v, s64 new) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); @@ -172,9 +172,9 @@ s64 atomic64_xchg(atomic64_t *v, s64 new) raw_spin_unlock_irqrestore(lock, flags); return val; } -EXPORT_SYMBOL(atomic64_xchg); +EXPORT_SYMBOL(generic_atomic64_xchg); -s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +s64 generic_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) { unsigned long flags; raw_spinlock_t *lock = lock_addr(v); @@ -188,4 +188,4 @@ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) return val; } -EXPORT_SYMBOL(atomic64_fetch_add_unless); +EXPORT_SYMBOL(generic_atomic64_fetch_add_unless); diff --git a/lib/locking-selftest.c b/lib/locking-selftest.c index 2d85abac1744..161108e5d2fe 100644 --- a/lib/locking-selftest.c +++ b/lib/locking-selftest.c @@ -53,6 +53,7 @@ __setup("debug_locks_verbose=", setup_debug_locks_verbose); #define LOCKTYPE_WW 0x10 #define LOCKTYPE_RTMUTEX 0x20 #define LOCKTYPE_LL 0x40 +#define LOCKTYPE_SPECIAL 0x80 static struct ww_acquire_ctx t, t2; static struct ww_mutex o, o2, o3; @@ -194,6 +195,7 @@ static void init_shared_classes(void) #define HARDIRQ_ENTER() \ local_irq_disable(); \ __irq_enter(); \ + lockdep_hardirq_threaded(); \ WARN_ON(!in_irq()); #define HARDIRQ_EXIT() \ @@ -2492,16 +2494,6 @@ static void rcu_sched_exit(int *_) int rcu_sched_guard_##name __guard(rcu_sched_exit); \ rcu_read_lock_sched(); -static void rcu_callback_exit(int *_) -{ - rcu_lock_release(&rcu_callback_map); -} - -#define RCU_CALLBACK_CONTEXT(name, ...) \ - int rcu_callback_guard_##name __guard(rcu_callback_exit); \ - rcu_lock_acquire(&rcu_callback_map); - - static void raw_spinlock_exit(raw_spinlock_t **lock) { raw_spin_unlock(*lock); @@ -2558,8 +2550,6 @@ static void __maybe_unused inner##_in_##outer(void) \ * ---------------+-------+----------+------+------- * RCU_BH | o | o | o | x * ---------------+-------+----------+------+------- - * RCU_CALLBACK | o | o | o | x - * ---------------+-------+----------+------+------- * RCU_SCHED | o | o | x | x * ---------------+-------+----------+------+------- * RAW_SPIN | o | o | x | x @@ -2576,7 +2566,6 @@ GENERATE_2_CONTEXT_TESTCASE(NOTTHREADED_HARDIRQ, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(SOFTIRQ, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RCU, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RCU_BH, , inner, inner_lock) \ -GENERATE_2_CONTEXT_TESTCASE(RCU_CALLBACK, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RCU_SCHED, , inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(RAW_SPINLOCK, raw_lock_A, inner, inner_lock) \ GENERATE_2_CONTEXT_TESTCASE(SPINLOCK, lock_A, inner, inner_lock) \ @@ -2638,10 +2627,6 @@ static void wait_context_tests(void) DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_BH); pr_cont("\n"); - print_testname("in RCU callback context"); - DO_CONTEXT_TESTCASE_OUTER_LIMITED_PREEMPTIBLE(RCU_CALLBACK); - pr_cont("\n"); - print_testname("in RCU-sched context"); DO_CONTEXT_TESTCASE_OUTER_NOT_PREEMPTIBLE(RCU_SCHED); pr_cont("\n"); @@ -2744,6 +2729,66 @@ static void local_lock_tests(void) pr_cont("\n"); } +static void hardirq_deadlock_softirq_not_deadlock(void) +{ + /* mutex_A is hardirq-unsafe and softirq-unsafe */ + /* mutex_A -> lock_C */ + mutex_lock(&mutex_A); + HARDIRQ_DISABLE(); + spin_lock(&lock_C); + spin_unlock(&lock_C); + HARDIRQ_ENABLE(); + mutex_unlock(&mutex_A); + + /* lock_A is hardirq-safe */ + HARDIRQ_ENTER(); + spin_lock(&lock_A); + spin_unlock(&lock_A); + HARDIRQ_EXIT(); + + /* lock_A -> lock_B */ + HARDIRQ_DISABLE(); + spin_lock(&lock_A); + spin_lock(&lock_B); + spin_unlock(&lock_B); + spin_unlock(&lock_A); + HARDIRQ_ENABLE(); + + /* lock_B -> lock_C */ + HARDIRQ_DISABLE(); + spin_lock(&lock_B); + spin_lock(&lock_C); + spin_unlock(&lock_C); + spin_unlock(&lock_B); + HARDIRQ_ENABLE(); + + /* lock_D is softirq-safe */ + SOFTIRQ_ENTER(); + spin_lock(&lock_D); + spin_unlock(&lock_D); + SOFTIRQ_EXIT(); + + /* And lock_D is hardirq-unsafe */ + SOFTIRQ_DISABLE(); + spin_lock(&lock_D); + spin_unlock(&lock_D); + SOFTIRQ_ENABLE(); + + /* + * mutex_A -> lock_C -> lock_D is softirq-unsafe -> softirq-safe, not + * deadlock. + * + * lock_A -> lock_B -> lock_C -> lock_D is hardirq-safe -> + * hardirq-unsafe, deadlock. + */ + HARDIRQ_DISABLE(); + spin_lock(&lock_C); + spin_lock(&lock_D); + spin_unlock(&lock_D); + spin_unlock(&lock_C); + HARDIRQ_ENABLE(); +} + void locking_selftest(void) { /* @@ -2872,6 +2917,10 @@ void locking_selftest(void) local_lock_tests(); + print_testname("hardirq_unsafe_softirq_safe"); + dotest(hardirq_deadlock_softirq_not_deadlock, FAILURE, LOCKTYPE_SPECIAL); + pr_cont("\n"); + if (unexpected_testcase_failures) { printk("-----------------------------------------------------------------\n"); debug_locks = 0; |