diff options
author | Petr Mladek <pmladek@suse.com> | 2016-12-14 15:05:55 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-14 16:04:08 -0800 |
commit | d5d8d3d0d4adcc3aec6e2e0fb656165014a712b7 (patch) | |
tree | 090aa03f64ff9535310f073d3574fb692f720fa9 /kernel/debug/kdb/kdb_io.c | |
parent | d1bd8ead126668a2d6c42d97cc3664e95b3fa1dc (diff) |
kdb: properly synchronize vkdb_printf() calls with other CPUs
kdb_printf_lock does not prevent other CPUs from entering the critical
section because it is ignored when KDB_STATE_PRINTF_LOCK is set.
The problematic situation might look like:
CPU0 CPU1
vkdb_printf()
if (!KDB_STATE(PRINTF_LOCK))
KDB_STATE_SET(PRINTF_LOCK);
spin_lock_irqsave(&kdb_printf_lock, flags);
vkdb_printf()
if (!KDB_STATE(PRINTF_LOCK))
BANG: The PRINTF_LOCK state is set and CPU1 is entering the critical
section without spinning on the lock.
The problem is that the code tries to implement locking using two state
variables that are not handled atomically. Well, we need a custom
locking because we want to allow reentering the critical section on the
very same CPU.
Let's use solution from Petr Zijlstra that was proposed for a similar
scenario, see
https://lkml.kernel.org/r/20161018171513.734367391@infradead.org
This patch uses the same trick with cmpxchg(). The only difference is
that we want to handle only recursion from the same context and
therefore we disable interrupts.
In addition, KDB_STATE_PRINTF_LOCK is removed. In fact, we are not able
to set it a non-racy way.
Link: http://lkml.kernel.org/r/1480412276-16690-3-git-send-email-pmladek@suse.com
Signed-off-by: Petr Mladek <pmladek@suse.com>
Reviewed-by: Daniel Thompson <daniel.thompson@linaro.org>
Cc: Jason Wessel <jason.wessel@windriver.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/debug/kdb/kdb_io.c')
-rw-r--r-- | kernel/debug/kdb/kdb_io.c | 30 |
1 files changed, 13 insertions, 17 deletions
diff --git a/kernel/debug/kdb/kdb_io.c b/kernel/debug/kdb/kdb_io.c index 46f477bebe0c..daa76154fb1b 100644 --- a/kernel/debug/kdb/kdb_io.c +++ b/kernel/debug/kdb/kdb_io.c @@ -555,16 +555,16 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) int colcount; int logging, saved_loglevel = 0; int saved_trap_printk; - int got_printf_lock = 0; int retlen = 0; int fnd, len; + int this_cpu, old_cpu; + static int kdb_printf_cpu = -1; char *cp, *cp2, *cphold = NULL, replaced_byte = ' '; char *moreprompt = "more> "; struct console *c = console_drivers; - static DEFINE_SPINLOCK(kdb_printf_lock); unsigned long uninitialized_var(flags); - preempt_disable(); + local_irq_save(flags); saved_trap_printk = kdb_trap_printk; kdb_trap_printk = 0; @@ -572,12 +572,13 @@ int vkdb_printf(enum kdb_msgsrc src, const char *fmt, va_list ap) * But if any cpu goes recursive in kdb, just print the output, * even if it is interleaved with any other text. */ - if (!KDB_STATE(PRINTF_LOCK)) { - KDB_STATE_SET(PRINTF_LOCK); - spin_lock_irqsave(&kdb_printf_lock, flags); - got_printf_lock = 1; - } else { - __acquire(kdb_printf_lock); + this_cpu = smp_processor_id(); + for (;;) { + old_cpu = cmpxchg(&kdb_printf_cpu, -1, this_cpu); + if (old_cpu == -1 || old_cpu == this_cpu) + break; + + cpu_relax(); } diag = kdbgetintenv("LINES", &linecount); @@ -846,15 +847,10 @@ kdb_print_out: suspend_grep = 0; /* end of what may have been a recursive call */ if (logging) console_loglevel = saved_loglevel; - if (KDB_STATE(PRINTF_LOCK) && got_printf_lock) { - got_printf_lock = 0; - spin_unlock_irqrestore(&kdb_printf_lock, flags); - KDB_STATE_CLEAR(PRINTF_LOCK); - } else { - __release(kdb_printf_lock); - } + /* kdb_printf_cpu locked the code above. */ + smp_store_release(&kdb_printf_cpu, old_cpu); kdb_trap_printk = saved_trap_printk; - preempt_enable(); + local_irq_restore(flags); return retlen; } |