diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/backtracetest.c | 1 | ||||
-rw-r--r-- | kernel/crash_reserve.c | 1 | ||||
-rw-r--r-- | kernel/events/core.c | 29 | ||||
-rw-r--r-- | kernel/fork.c | 7 | ||||
-rw-r--r-- | kernel/hung_task.c | 2 | ||||
-rw-r--r-- | kernel/panic.c | 116 | ||||
-rw-r--r-- | kernel/resource_kunit.c | 1 | ||||
-rw-r--r-- | kernel/tsacct.c | 2 | ||||
-rw-r--r-- | kernel/watchdog_perf.c | 11 |
9 files changed, 110 insertions, 60 deletions
diff --git a/kernel/backtracetest.c b/kernel/backtracetest.c index a4181234232b..2dfe66b9ed76 100644 --- a/kernel/backtracetest.c +++ b/kernel/backtracetest.c @@ -74,5 +74,6 @@ static void exitf(void) module_init(backtrace_regression_test); module_exit(exitf); +MODULE_DESCRIPTION("Simple stack backtrace regression test module"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>"); diff --git a/kernel/crash_reserve.c b/kernel/crash_reserve.c index 5b2722a93a48..d3b4cd12bdd1 100644 --- a/kernel/crash_reserve.c +++ b/kernel/crash_reserve.c @@ -13,7 +13,6 @@ #include <linux/memory.h> #include <linux/cpuhotplug.h> #include <linux/memblock.h> -#include <linux/kexec.h> #include <linux/kmemleak.h> #include <asm/page.h> diff --git a/kernel/events/core.c b/kernel/events/core.c index a2f3545f31b2..af2e3a06b239 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -534,7 +534,7 @@ void perf_sample_event_took(u64 sample_len_ns) __this_cpu_write(running_sample_length, running_len); /* - * Note: this will be biased artifically low until we have + * Note: this will be biased artificially low until we have * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us * from having to maintain a count. */ @@ -596,10 +596,10 @@ static inline u64 perf_event_clock(struct perf_event *event) * * Event groups make things a little more complicated, but not terribly so. The * rules for a group are that if the group leader is OFF the entire group is - * OFF, irrespecive of what the group member states are. This results in + * OFF, irrespective of what the group member states are. This results in * __perf_effective_state(). * - * A futher ramification is that when a group leader flips between OFF and + * A further ramification is that when a group leader flips between OFF and * !OFF, we need to update all group member times. * * @@ -891,7 +891,7 @@ static int perf_cgroup_ensure_storage(struct perf_event *event, int cpu, heap_size, ret = 0; /* - * Allow storage to have sufficent space for an iterator for each + * Allow storage to have sufficient space for an iterator for each * possibly nested cgroup plus an iterator for events with no cgroup. */ for (heap_size = 1; css; css = css->parent) @@ -3671,7 +3671,7 @@ void __perf_event_task_sched_out(struct task_struct *task, perf_cgroup_switch(next); } -static bool perf_less_group_idx(const void *l, const void *r) +static bool perf_less_group_idx(const void *l, const void *r, void __always_unused *args) { const struct perf_event *le = *(const struct perf_event **)l; const struct perf_event *re = *(const struct perf_event **)r; @@ -3679,20 +3679,21 @@ static bool perf_less_group_idx(const void *l, const void *r) return le->group_index < re->group_index; } -static void swap_ptr(void *l, void *r) +static void swap_ptr(void *l, void *r, void __always_unused *args) { void **lp = l, **rp = r; swap(*lp, *rp); } +DEFINE_MIN_HEAP(struct perf_event *, perf_event_min_heap); + static const struct min_heap_callbacks perf_min_heap = { - .elem_size = sizeof(struct perf_event *), .less = perf_less_group_idx, .swp = swap_ptr, }; -static void __heap_add(struct min_heap *heap, struct perf_event *event) +static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event) { struct perf_event **itrs = heap->data; @@ -3726,7 +3727,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx = NULL; /* Space for per CPU and/or any CPU event iterators. */ struct perf_event *itrs[2]; - struct min_heap event_heap; + struct perf_event_min_heap event_heap; struct perf_event **evt; int ret; @@ -3735,7 +3736,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx, if (!ctx->task) { cpuctx = this_cpu_ptr(&perf_cpu_context); - event_heap = (struct min_heap){ + event_heap = (struct perf_event_min_heap){ .data = cpuctx->heap, .nr = 0, .size = cpuctx->heap_size, @@ -3748,7 +3749,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx, css = &cpuctx->cgrp->css; #endif } else { - event_heap = (struct min_heap){ + event_heap = (struct perf_event_min_heap){ .data = itrs, .nr = 0, .size = ARRAY_SIZE(itrs), @@ -3770,7 +3771,7 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx, perf_assert_pmu_disabled((*evt)->pmu_ctx->pmu); } - min_heapify_all(&event_heap, &perf_min_heap); + min_heapify_all(&event_heap, &perf_min_heap, NULL); while (event_heap.nr) { ret = func(*evt, data); @@ -3779,9 +3780,9 @@ static noinline int visit_groups_merge(struct perf_event_context *ctx, *evt = perf_event_groups_next(*evt, pmu); if (*evt) - min_heapify(&event_heap, 0, &perf_min_heap); + min_heap_sift_down(&event_heap, 0, &perf_min_heap, NULL); else - min_heap_pop(&event_heap, &perf_min_heap); + min_heap_pop(&event_heap, &perf_min_heap, NULL); } return 0; diff --git a/kernel/fork.c b/kernel/fork.c index ef48f6bdf175..a8362c26ebcb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -208,9 +208,10 @@ static bool try_release_thread_stack_to_cache(struct vm_struct *vm) unsigned int i; for (i = 0; i < NR_CACHED_STACKS; i++) { - if (this_cpu_cmpxchg(cached_stacks[i], NULL, vm) != NULL) - continue; - return true; + struct vm_struct *tmp = NULL; + + if (this_cpu_try_cmpxchg(cached_stacks[i], &tmp, vm)) + return true; } return false; } diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 1d92016b0b3c..6ca859715d8a 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c @@ -127,7 +127,7 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout) * Ok, the task did not get scheduled for more than 2 minutes, * complain: */ - if (sysctl_hung_task_warnings) { + if (sysctl_hung_task_warnings || hung_task_call_panic) { if (sysctl_hung_task_warnings > 0) sysctl_hung_task_warnings--; pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n", diff --git a/kernel/panic.c b/kernel/panic.c index 8bff183d6180..f861bedc1925 100644 --- a/kernel/panic.c +++ b/kernel/panic.c @@ -35,6 +35,7 @@ #include <linux/debugfs.h> #include <linux/sysfs.h> #include <linux/context_tracking.h> +#include <linux/seq_buf.h> #include <trace/events/error_report.h> #include <asm/sections.h> @@ -470,32 +471,83 @@ void panic(const char *fmt, ...) EXPORT_SYMBOL(panic); +#define TAINT_FLAG(taint, _c_true, _c_false, _module) \ + [ TAINT_##taint ] = { \ + .c_true = _c_true, .c_false = _c_false, \ + .module = _module, \ + .desc = #taint, \ + } + /* * TAINT_FORCED_RMMOD could be a per-module flag but the module * is being removed anyway. */ const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { - [ TAINT_PROPRIETARY_MODULE ] = { 'P', 'G', true }, - [ TAINT_FORCED_MODULE ] = { 'F', ' ', true }, - [ TAINT_CPU_OUT_OF_SPEC ] = { 'S', ' ', false }, - [ TAINT_FORCED_RMMOD ] = { 'R', ' ', false }, - [ TAINT_MACHINE_CHECK ] = { 'M', ' ', false }, - [ TAINT_BAD_PAGE ] = { 'B', ' ', false }, - [ TAINT_USER ] = { 'U', ' ', false }, - [ TAINT_DIE ] = { 'D', ' ', false }, - [ TAINT_OVERRIDDEN_ACPI_TABLE ] = { 'A', ' ', false }, - [ TAINT_WARN ] = { 'W', ' ', false }, - [ TAINT_CRAP ] = { 'C', ' ', true }, - [ TAINT_FIRMWARE_WORKAROUND ] = { 'I', ' ', false }, - [ TAINT_OOT_MODULE ] = { 'O', ' ', true }, - [ TAINT_UNSIGNED_MODULE ] = { 'E', ' ', true }, - [ TAINT_SOFTLOCKUP ] = { 'L', ' ', false }, - [ TAINT_LIVEPATCH ] = { 'K', ' ', true }, - [ TAINT_AUX ] = { 'X', ' ', true }, - [ TAINT_RANDSTRUCT ] = { 'T', ' ', true }, - [ TAINT_TEST ] = { 'N', ' ', true }, + TAINT_FLAG(PROPRIETARY_MODULE, 'P', 'G', true), + TAINT_FLAG(FORCED_MODULE, 'F', ' ', true), + TAINT_FLAG(CPU_OUT_OF_SPEC, 'S', ' ', false), + TAINT_FLAG(FORCED_RMMOD, 'R', ' ', false), + TAINT_FLAG(MACHINE_CHECK, 'M', ' ', false), + TAINT_FLAG(BAD_PAGE, 'B', ' ', false), + TAINT_FLAG(USER, 'U', ' ', false), + TAINT_FLAG(DIE, 'D', ' ', false), + TAINT_FLAG(OVERRIDDEN_ACPI_TABLE, 'A', ' ', false), + TAINT_FLAG(WARN, 'W', ' ', false), + TAINT_FLAG(CRAP, 'C', ' ', true), + TAINT_FLAG(FIRMWARE_WORKAROUND, 'I', ' ', false), + TAINT_FLAG(OOT_MODULE, 'O', ' ', true), + TAINT_FLAG(UNSIGNED_MODULE, 'E', ' ', true), + TAINT_FLAG(SOFTLOCKUP, 'L', ' ', false), + TAINT_FLAG(LIVEPATCH, 'K', ' ', true), + TAINT_FLAG(AUX, 'X', ' ', true), + TAINT_FLAG(RANDSTRUCT, 'T', ' ', true), + TAINT_FLAG(TEST, 'N', ' ', true), }; +#undef TAINT_FLAG + +static void print_tainted_seq(struct seq_buf *s, bool verbose) +{ + const char *sep = ""; + int i; + + if (!tainted_mask) { + seq_buf_puts(s, "Not tainted"); + return; + } + + seq_buf_printf(s, "Tainted: "); + for (i = 0; i < TAINT_FLAGS_COUNT; i++) { + const struct taint_flag *t = &taint_flags[i]; + bool is_set = test_bit(i, &tainted_mask); + char c = is_set ? t->c_true : t->c_false; + + if (verbose) { + if (is_set) { + seq_buf_printf(s, "%s[%c]=%s", sep, c, t->desc); + sep = ", "; + } + } else { + seq_buf_putc(s, c); + } + } +} + +static const char *_print_tainted(bool verbose) +{ + /* FIXME: what should the size be? */ + static char buf[sizeof(taint_flags)]; + struct seq_buf s; + + BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); + + seq_buf_init(&s, buf, sizeof(buf)); + + print_tainted_seq(&s, verbose); + + return seq_buf_str(&s); +} + /** * print_tainted - return a string to represent the kernel taint state. * @@ -506,25 +558,15 @@ const struct taint_flag taint_flags[TAINT_FLAGS_COUNT] = { */ const char *print_tainted(void) { - static char buf[TAINT_FLAGS_COUNT + sizeof("Tainted: ")]; - - BUILD_BUG_ON(ARRAY_SIZE(taint_flags) != TAINT_FLAGS_COUNT); - - if (tainted_mask) { - char *s; - int i; - - s = buf + sprintf(buf, "Tainted: "); - for (i = 0; i < TAINT_FLAGS_COUNT; i++) { - const struct taint_flag *t = &taint_flags[i]; - *s++ = test_bit(i, &tainted_mask) ? - t->c_true : t->c_false; - } - *s = 0; - } else - snprintf(buf, sizeof(buf), "Not tainted"); + return _print_tainted(false); +} - return buf; +/** + * print_tainted_verbose - A more verbose version of print_tainted() + */ +const char *print_tainted_verbose(void) +{ + return _print_tainted(true); } int test_taint(unsigned flag) diff --git a/kernel/resource_kunit.c b/kernel/resource_kunit.c index 58ab9f914602..0e509985a44a 100644 --- a/kernel/resource_kunit.c +++ b/kernel/resource_kunit.c @@ -149,4 +149,5 @@ static struct kunit_suite resource_test_suite = { }; kunit_test_suite(resource_test_suite); +MODULE_DESCRIPTION("I/O Port & Memory Resource manager unit tests"); MODULE_LICENSE("GPL"); diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 4252f0645b9e..16b283f9d831 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c @@ -76,7 +76,7 @@ void bacct_add_tsk(struct user_namespace *user_ns, stats->ac_minflt = tsk->min_flt; stats->ac_majflt = tsk->maj_flt; - strncpy(stats->ac_comm, tsk->comm, sizeof(stats->ac_comm)); + strscpy_pad(stats->ac_comm, tsk->comm); } diff --git a/kernel/watchdog_perf.c b/kernel/watchdog_perf.c index d577c4a8321e..59c1d86a73a2 100644 --- a/kernel/watchdog_perf.c +++ b/kernel/watchdog_perf.c @@ -75,11 +75,15 @@ static bool watchdog_check_timestamp(void) __this_cpu_write(last_timestamp, now); return true; } -#else -static inline bool watchdog_check_timestamp(void) + +static void watchdog_init_timestamp(void) { - return true; + __this_cpu_write(nmi_rearmed, 0); + __this_cpu_write(last_timestamp, ktime_get_mono_fast_ns()); } +#else +static inline bool watchdog_check_timestamp(void) { return true; } +static inline void watchdog_init_timestamp(void) { } #endif static struct perf_event_attr wd_hw_attr = { @@ -161,6 +165,7 @@ void watchdog_hardlockup_enable(unsigned int cpu) if (!atomic_fetch_inc(&watchdog_cpus)) pr_info("Enabled. Permanently consumes one hw-PMU counter.\n"); + watchdog_init_timestamp(); perf_event_enable(this_cpu_read(watchdog_ev)); } |