summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig2
-rw-r--r--lib/Kconfig.debug62
-rw-r--r--lib/Makefile12
-rw-r--r--lib/alloc_tag.c246
-rw-r--r--lib/buildid.c4
-rw-r--r--lib/cmpxchg-emu.c45
-rw-r--r--lib/codetag.c283
-rw-r--r--lib/crypto/Kconfig5
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--lib/crypto/aescfb.c257
-rw-r--r--lib/dim/Makefile4
-rw-r--r--lib/dim/dim.c3
-rw-r--r--lib/dynamic_debug.c6
-rw-r--r--lib/dynamic_queue_limits.c13
-rw-r--r--lib/find_bit.c12
-rw-r--r--lib/fortify_kunit.c222
-rw-r--r--lib/iomap_copy.c13
-rw-r--r--lib/kunit/Kconfig11
-rw-r--r--lib/kunit/device.c2
-rw-r--r--lib/kunit/kunit-test.c45
-rw-r--r--lib/kunit/string-stream-test.c12
-rw-r--r--lib/kunit/test.c3
-rw-r--r--lib/kunit/try-catch.c40
-rw-r--r--lib/kunit_iov_iter.c18
-rw-r--r--lib/maple_tree.c16
-rw-r--r--lib/math/prime_numbers.c2
-rw-r--r--lib/memcpy_kunit.c53
-rw-r--r--lib/objpool.c112
-rw-r--r--lib/raid6/Makefile2
-rw-r--r--lib/rhashtable.c22
-rw-r--r--lib/sbitmap.c8
-rw-r--r--lib/scatterlist.c2
-rw-r--r--lib/slub_kunit.c2
-rw-r--r--lib/strcat_kunit.c104
-rw-r--r--lib/string_kunit.c461
-rw-r--r--lib/strscpy_kunit.c142
-rw-r--r--lib/test_bitmap.c203
-rw-r--r--lib/test_bpf.c2
-rw-r--r--lib/test_hmm.c8
-rw-r--r--lib/test_xarray.c120
-rw-r--r--lib/ubsan.h43
-rw-r--r--lib/vdso/Kconfig7
-rw-r--r--lib/vdso/gettimeofday.c55
-rw-r--r--lib/xarray.c75
44 files changed, 2113 insertions, 649 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 4557bb8a5256..d33a268bc256 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -628,7 +628,7 @@ config SIGNATURE
Implementation is done using GnuPG MPI library
config DIMLIB
- bool
+ tristate
help
Dynamic Interrupt Moderation library.
Implements an algorithm for dynamically changing CQ moderation values
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index c63a5fbf1f1c..c1c1b19525a5 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -375,7 +375,7 @@ config DEBUG_INFO_SPLIT
Incompatible with older versions of ccache.
config DEBUG_INFO_BTF
- bool "Generate BTF typeinfo"
+ bool "Generate BTF type information"
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
depends on BPF_SYSCALL
@@ -408,7 +408,8 @@ config PAHOLE_HAS_LANG_EXCLUDE
using DEBUG_INFO_BTF_MODULES.
config DEBUG_INFO_BTF_MODULES
- def_bool y
+ bool "Generate BTF type information for kernel modules"
+ default y
depends on DEBUG_INFO_BTF && MODULES && PAHOLE_HAS_SPLIT_BTF
help
Generate compact split BTF type information for kernel modules.
@@ -968,6 +969,37 @@ config DEBUG_STACKOVERFLOW
If in doubt, say "N".
+config CODE_TAGGING
+ bool
+ select KALLSYMS
+
+config MEM_ALLOC_PROFILING
+ bool "Enable memory allocation profiling"
+ default n
+ depends on PROC_FS
+ depends on !DEBUG_FORCE_WEAK_PER_CPU
+ select CODE_TAGGING
+ select PAGE_EXTENSION
+ select SLAB_OBJ_EXT
+ help
+ Track allocation source code and record total allocation size
+ initiated at that code location. The mechanism can be used to track
+ memory leaks with a low performance and memory impact.
+
+config MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+ bool "Enable memory allocation profiling by default"
+ default y
+ depends on MEM_ALLOC_PROFILING
+
+config MEM_ALLOC_PROFILING_DEBUG
+ bool "Memory allocation profiler debugging"
+ default n
+ depends on MEM_ALLOC_PROFILING
+ select MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+ help
+ Adds warnings with helpful error messages for memory allocation
+ profiling.
+
source "lib/Kconfig.kasan"
source "lib/Kconfig.kfence"
source "lib/Kconfig.kmsan"
@@ -1029,6 +1061,20 @@ config SOFTLOCKUP_DETECTOR
chance to run. The current stack trace is displayed upon
detection and the system will stay locked up.
+config SOFTLOCKUP_DETECTOR_INTR_STORM
+ bool "Detect Interrupt Storm in Soft Lockups"
+ depends on SOFTLOCKUP_DETECTOR && IRQ_TIME_ACCOUNTING
+ select GENERIC_IRQ_STAT_SNAPSHOT
+ default y if NR_CPUS <= 128
+ help
+ Say Y here to enable the kernel to detect interrupt storm
+ during "soft lockups".
+
+ "soft lockups" can be caused by a variety of reasons. If one is
+ caused by an interrupt storm, then the storming interrupts will not
+ be on the callstack. To detect this case, it is necessary to report
+ the CPU stats and the interrupt counts during the "soft lockups".
+
config BOOTPARAM_SOFTLOCKUP_PANIC
bool "Panic (Reboot) On Soft Lockups"
depends on SOFTLOCKUP_DETECTOR
@@ -1250,7 +1296,7 @@ config SCHED_INFO
config SCHEDSTATS
bool "Collect scheduler statistics"
- depends on DEBUG_KERNEL && PROC_FS
+ depends on PROC_FS
select SCHED_INFO
help
If you say Y here, additional code will be inserted into the
@@ -2758,16 +2804,6 @@ config HW_BREAKPOINT_KUNIT_TEST
If unsure, say N.
-config STRCAT_KUNIT_TEST
- tristate "Test strcat() family of functions at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT
- default KUNIT_ALL_TESTS
-
-config STRSCPY_KUNIT_TEST
- tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS
- depends on KUNIT
- default KUNIT_ALL_TESTS
-
config SIPHASH_KUNIT_TEST
tristate "Perform selftest on siphash functions" if !KUNIT_ALL_TESTS
depends on KUNIT
diff --git a/lib/Makefile b/lib/Makefile
index ffc6b2341b45..7a1fdd1cce7a 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -233,9 +233,13 @@ obj-$(CONFIG_OF_RECONFIG_NOTIFIER_ERROR_INJECT) += \
of-reconfig-notifier-error-inject.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
+obj-$(CONFIG_CODE_TAGGING) += codetag.o
+obj-$(CONFIG_MEM_ALLOC_PROFILING) += alloc_tag.o
+
lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
+obj-$(CONFIG_ARCH_NEED_CMPXCHG_1_EMU) += cmpxchg-emu.o
obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
#ensure exported functions have prototypes
@@ -352,7 +356,7 @@ $(obj)/oid_registry_data.c: $(srctree)/include/linux/oid_registry.h \
$(call cmd,build_OID_registry)
quiet_cmd_build_OID_registry = GEN $@
- cmd_build_OID_registry = perl $(srctree)/$(src)/build_OID_registry $< $@
+ cmd_build_OID_registry = perl $(src)/build_OID_registry $< $@
clean-files += oid_registry_data.c
@@ -403,8 +407,6 @@ CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-overread)
CFLAGS_fortify_kunit.o += $(call cc-disable-warning, stringop-truncation)
CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o
-obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o
-obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o
obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o
obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
@@ -412,8 +414,8 @@ obj-$(CONFIG_GENERIC_LIB_DEVMEM_IS_ALLOWED) += devmem_is_allowed.o
obj-$(CONFIG_FIRMWARE_TABLE) += fw_table.o
# FORTIFY_SOURCE compile-time behavior tests
-TEST_FORTIFY_SRCS = $(wildcard $(srctree)/$(src)/test_fortify/*-*.c)
-TEST_FORTIFY_LOGS = $(patsubst $(srctree)/$(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
+TEST_FORTIFY_SRCS = $(wildcard $(src)/test_fortify/*-*.c)
+TEST_FORTIFY_LOGS = $(patsubst $(src)/%.c, %.log, $(TEST_FORTIFY_SRCS))
TEST_FORTIFY_LOG = test_fortify.log
quiet_cmd_test_fortify = TEST $@
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
new file mode 100644
index 000000000000..531dbe2f5456
--- /dev/null
+++ b/lib/alloc_tag.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/alloc_tag.h>
+#include <linux/fs.h>
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/page_ext.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_buf.h>
+#include <linux/seq_file.h>
+
+static struct codetag_type *alloc_tag_cttype;
+
+DEFINE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
+EXPORT_SYMBOL(_shared_alloc_tag);
+
+DEFINE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
+ mem_alloc_profiling_key);
+
+static void *allocinfo_start(struct seq_file *m, loff_t *pos)
+{
+ struct codetag_iterator *iter;
+ struct codetag *ct;
+ loff_t node = *pos;
+
+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
+ m->private = iter;
+ if (!iter)
+ return NULL;
+
+ codetag_lock_module_list(alloc_tag_cttype, true);
+ *iter = codetag_get_ct_iter(alloc_tag_cttype);
+ while ((ct = codetag_next_ct(iter)) != NULL && node)
+ node--;
+
+ return ct ? iter : NULL;
+}
+
+static void *allocinfo_next(struct seq_file *m, void *arg, loff_t *pos)
+{
+ struct codetag_iterator *iter = (struct codetag_iterator *)arg;
+ struct codetag *ct = codetag_next_ct(iter);
+
+ (*pos)++;
+ if (!ct)
+ return NULL;
+
+ return iter;
+}
+
+static void allocinfo_stop(struct seq_file *m, void *arg)
+{
+ struct codetag_iterator *iter = (struct codetag_iterator *)m->private;
+
+ if (iter) {
+ codetag_lock_module_list(alloc_tag_cttype, false);
+ kfree(iter);
+ }
+}
+
+static void alloc_tag_to_text(struct seq_buf *out, struct codetag *ct)
+{
+ struct alloc_tag *tag = ct_to_alloc_tag(ct);
+ struct alloc_tag_counters counter = alloc_tag_read(tag);
+ s64 bytes = counter.bytes;
+
+ seq_buf_printf(out, "%12lli %8llu ", bytes, counter.calls);
+ codetag_to_text(out, ct);
+ seq_buf_putc(out, ' ');
+ seq_buf_putc(out, '\n');
+}
+
+static int allocinfo_show(struct seq_file *m, void *arg)
+{
+ struct codetag_iterator *iter = (struct codetag_iterator *)arg;
+ char *bufp;
+ size_t n = seq_get_buf(m, &bufp);
+ struct seq_buf buf;
+
+ seq_buf_init(&buf, bufp, n);
+ alloc_tag_to_text(&buf, iter->ct);
+ seq_commit(m, seq_buf_used(&buf));
+ return 0;
+}
+
+static const struct seq_operations allocinfo_seq_op = {
+ .start = allocinfo_start,
+ .next = allocinfo_next,
+ .stop = allocinfo_stop,
+ .show = allocinfo_show,
+};
+
+size_t alloc_tag_top_users(struct codetag_bytes *tags, size_t count, bool can_sleep)
+{
+ struct codetag_iterator iter;
+ struct codetag *ct;
+ struct codetag_bytes n;
+ unsigned int i, nr = 0;
+
+ if (can_sleep)
+ codetag_lock_module_list(alloc_tag_cttype, true);
+ else if (!codetag_trylock_module_list(alloc_tag_cttype))
+ return 0;
+
+ iter = codetag_get_ct_iter(alloc_tag_cttype);
+ while ((ct = codetag_next_ct(&iter))) {
+ struct alloc_tag_counters counter = alloc_tag_read(ct_to_alloc_tag(ct));
+
+ n.ct = ct;
+ n.bytes = counter.bytes;
+
+ for (i = 0; i < nr; i++)
+ if (n.bytes > tags[i].bytes)
+ break;
+
+ if (i < count) {
+ nr -= nr == count;
+ memmove(&tags[i + 1],
+ &tags[i],
+ sizeof(tags[0]) * (nr - i));
+ nr++;
+ tags[i] = n;
+ }
+ }
+
+ codetag_lock_module_list(alloc_tag_cttype, false);
+
+ return nr;
+}
+
+static void __init procfs_init(void)
+{
+ proc_create_seq("allocinfo", 0400, NULL, &allocinfo_seq_op);
+}
+
+static bool alloc_tag_module_unload(struct codetag_type *cttype,
+ struct codetag_module *cmod)
+{
+ struct codetag_iterator iter = codetag_get_ct_iter(cttype);
+ struct alloc_tag_counters counter;
+ bool module_unused = true;
+ struct alloc_tag *tag;
+ struct codetag *ct;
+
+ for (ct = codetag_next_ct(&iter); ct; ct = codetag_next_ct(&iter)) {
+ if (iter.cmod != cmod)
+ continue;
+
+ tag = ct_to_alloc_tag(ct);
+ counter = alloc_tag_read(tag);
+
+ if (WARN(counter.bytes,
+ "%s:%u module %s func:%s has %llu allocated at module unload",
+ ct->filename, ct->lineno, ct->modname, ct->function, counter.bytes))
+ module_unused = false;
+ }
+
+ return module_unused;
+}
+
+#ifdef CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT
+static bool mem_profiling_support __meminitdata = true;
+#else
+static bool mem_profiling_support __meminitdata;
+#endif
+
+static int __init setup_early_mem_profiling(char *str)
+{
+ bool enable;
+
+ if (!str || !str[0])
+ return -EINVAL;
+
+ if (!strncmp(str, "never", 5)) {
+ enable = false;
+ mem_profiling_support = false;
+ } else {
+ int res;
+
+ res = kstrtobool(str, &enable);
+ if (res)
+ return res;
+
+ mem_profiling_support = true;
+ }
+
+ if (enable != static_key_enabled(&mem_alloc_profiling_key)) {
+ if (enable)
+ static_branch_enable(&mem_alloc_profiling_key);
+ else
+ static_branch_disable(&mem_alloc_profiling_key);
+ }
+
+ return 0;
+}
+early_param("sysctl.vm.mem_profiling", setup_early_mem_profiling);
+
+static __init bool need_page_alloc_tagging(void)
+{
+ return mem_profiling_support;
+}
+
+static __init void init_page_alloc_tagging(void)
+{
+}
+
+struct page_ext_operations page_alloc_tagging_ops = {
+ .size = sizeof(union codetag_ref),
+ .need = need_page_alloc_tagging,
+ .init = init_page_alloc_tagging,
+};
+EXPORT_SYMBOL(page_alloc_tagging_ops);
+
+static struct ctl_table memory_allocation_profiling_sysctls[] = {
+ {
+ .procname = "mem_profiling",
+ .data = &mem_alloc_profiling_key,
+#ifdef CONFIG_MEM_ALLOC_PROFILING_DEBUG
+ .mode = 0444,
+#else
+ .mode = 0644,
+#endif
+ .proc_handler = proc_do_static_key,
+ },
+ { }
+};
+
+static int __init alloc_tag_init(void)
+{
+ const struct codetag_type_desc desc = {
+ .section = "alloc_tags",
+ .tag_size = sizeof(struct alloc_tag),
+ .module_unload = alloc_tag_module_unload,
+ };
+
+ alloc_tag_cttype = codetag_register_type(&desc);
+ if (IS_ERR(alloc_tag_cttype))
+ return PTR_ERR(alloc_tag_cttype);
+
+ if (!mem_profiling_support)
+ memory_allocation_profiling_sysctls[0].mode = 0444;
+ register_sysctl_init("vm", memory_allocation_profiling_sysctls);
+ procfs_init();
+
+ return 0;
+}
+module_init(alloc_tag_init);
diff --git a/lib/buildid.c b/lib/buildid.c
index 898301b49eb6..7954dd92e36c 100644
--- a/lib/buildid.c
+++ b/lib/buildid.c
@@ -182,8 +182,8 @@ unsigned char vmlinux_build_id[BUILD_ID_SIZE_MAX] __ro_after_init;
*/
void __init init_vmlinux_build_id(void)
{
- extern const void __start_notes __weak;
- extern const void __stop_notes __weak;
+ extern const void __start_notes;
+ extern const void __stop_notes;
unsigned int size = &__stop_notes - &__start_notes;
build_id_parse_buf(&__start_notes, vmlinux_build_id, size);
diff --git a/lib/cmpxchg-emu.c b/lib/cmpxchg-emu.c
new file mode 100644
index 000000000000..27f6f97cb60d
--- /dev/null
+++ b/lib/cmpxchg-emu.c
@@ -0,0 +1,45 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Emulated 1-byte cmpxchg operation for architectures lacking direct
+ * support for this size. This is implemented in terms of 4-byte cmpxchg
+ * operations.
+ *
+ * Copyright (C) 2024 Paul E. McKenney.
+ */
+
+#include <linux/types.h>
+#include <linux/export.h>
+#include <linux/instrumented.h>
+#include <linux/atomic.h>
+#include <linux/panic.h>
+#include <linux/bug.h>
+#include <asm-generic/rwonce.h>
+#include <linux/cmpxchg-emu.h>
+
+union u8_32 {
+ u8 b[4];
+ u32 w;
+};
+
+/* Emulate one-byte cmpxchg() in terms of 4-byte cmpxchg. */
+uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new)
+{
+ u32 *p32 = (u32 *)(((uintptr_t)p) & ~0x3);
+ int i = ((uintptr_t)p) & 0x3;
+ union u8_32 old32;
+ union u8_32 new32;
+ u32 ret;
+
+ ret = READ_ONCE(*p32);
+ do {
+ old32.w = ret;
+ if (old32.b[i] != old)
+ return old32.b[i];
+ new32.w = old32.w;
+ new32.b[i] = new;
+ instrument_atomic_read_write(p, 1);
+ ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above.
+ } while (ret != old32.w);
+ return old;
+}
+EXPORT_SYMBOL_GPL(cmpxchg_emu_u8);
diff --git a/lib/codetag.c b/lib/codetag.c
new file mode 100644
index 000000000000..5ace625f2328
--- /dev/null
+++ b/lib/codetag.c
@@ -0,0 +1,283 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/codetag.h>
+#include <linux/idr.h>
+#include <linux/kallsyms.h>
+#include <linux/module.h>
+#include <linux/seq_buf.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+struct codetag_type {
+ struct list_head link;
+ unsigned int count;
+ struct idr mod_idr;
+ struct rw_semaphore mod_lock; /* protects mod_idr */
+ struct codetag_type_desc desc;
+};
+
+struct codetag_range {
+ struct codetag *start;
+ struct codetag *stop;
+};
+
+struct codetag_module {
+ struct module *mod;
+ struct codetag_range range;
+};
+
+static DEFINE_MUTEX(codetag_lock);
+static LIST_HEAD(codetag_types);
+
+void codetag_lock_module_list(struct codetag_type *cttype, bool lock)
+{
+ if (lock)
+ down_read(&cttype->mod_lock);
+ else
+ up_read(&cttype->mod_lock);
+}
+
+bool codetag_trylock_module_list(struct codetag_type *cttype)
+{
+ return down_read_trylock(&cttype->mod_lock) != 0;
+}
+
+struct codetag_iterator codetag_get_ct_iter(struct codetag_type *cttype)
+{
+ struct codetag_iterator iter = {
+ .cttype = cttype,
+ .cmod = NULL,
+ .mod_id = 0,
+ .ct = NULL,
+ };
+
+ return iter;
+}
+
+static inline struct codetag *get_first_module_ct(struct codetag_module *cmod)
+{
+ return cmod->range.start < cmod->range.stop ? cmod->range.start : NULL;
+}
+
+static inline
+struct codetag *get_next_module_ct(struct codetag_iterator *iter)
+{
+ struct codetag *res = (struct codetag *)
+ ((char *)iter->ct + iter->cttype->desc.tag_size);
+
+ return res < iter->cmod->range.stop ? res : NULL;
+}
+
+struct codetag *codetag_next_ct(struct codetag_iterator *iter)
+{
+ struct codetag_type *cttype = iter->cttype;
+ struct codetag_module *cmod;
+ struct codetag *ct;
+
+ lockdep_assert_held(&cttype->mod_lock);
+
+ if (unlikely(idr_is_empty(&cttype->mod_idr)))
+ return NULL;
+
+ ct = NULL;
+ while (true) {
+ cmod = idr_find(&cttype->mod_idr, iter->mod_id);
+
+ /* If module was removed move to the next one */
+ if (!cmod)
+ cmod = idr_get_next_ul(&cttype->mod_idr,
+ &iter->mod_id);
+
+ /* Exit if no more modules */
+ if (!cmod)
+ break;
+
+ if (cmod != iter->cmod) {
+ iter->cmod = cmod;
+ ct = get_first_module_ct(cmod);
+ } else
+ ct = get_next_module_ct(iter);
+
+ if (ct)
+ break;
+
+ iter->mod_id++;
+ }
+
+ iter->ct = ct;
+ return ct;
+}
+
+void codetag_to_text(struct seq_buf *out, struct codetag *ct)
+{
+ if (ct->modname)
+ seq_buf_printf(out, "%s:%u [%s] func:%s",
+ ct->filename, ct->lineno,
+ ct->modname, ct->function);
+ else
+ seq_buf_printf(out, "%s:%u func:%s",
+ ct->filename, ct->lineno, ct->function);
+}
+
+static inline size_t range_size(const struct codetag_type *cttype,
+ const struct codetag_range *range)
+{
+ return ((char *)range->stop - (char *)range->start) /
+ cttype->desc.tag_size;
+}
+
+#ifdef CONFIG_MODULES
+static void *get_symbol(struct module *mod, const char *prefix, const char *name)
+{
+ DECLARE_SEQ_BUF(sb, KSYM_NAME_LEN);
+ const char *buf;
+ void *ret;
+
+ seq_buf_printf(&sb, "%s%s", prefix, name);
+ if (seq_buf_has_overflowed(&sb))
+ return NULL;
+
+ buf = seq_buf_str(&sb);
+ preempt_disable();
+ ret = mod ?
+ (void *)find_kallsyms_symbol_value(mod, buf) :
+ (void *)kallsyms_lookup_name(buf);
+ preempt_enable();
+
+ return ret;
+}
+
+static struct codetag_range get_section_range(struct module *mod,
+ const char *section)
+{
+ return (struct codetag_range) {
+ get_symbol(mod, "__start_", section),
+ get_symbol(mod, "__stop_", section),
+ };
+}
+
+static int codetag_module_init(struct codetag_type *cttype, struct module *mod)
+{
+ struct codetag_range range;
+ struct codetag_module *cmod;
+ int err;
+
+ range = get_section_range(mod, cttype->desc.section);
+ if (!range.start || !range.stop) {
+ pr_warn("Failed to load code tags of type %s from the module %s\n",
+ cttype->desc.section,
+ mod ? mod->name : "(built-in)");
+ return -EINVAL;
+ }
+
+ /* Ignore empty ranges */
+ if (range.start == range.stop)
+ return 0;
+
+ BUG_ON(range.start > range.stop);
+
+ cmod = kmalloc(sizeof(*cmod), GFP_KERNEL);
+ if (unlikely(!cmod))
+ return -ENOMEM;
+
+ cmod->mod = mod;
+ cmod->range = range;
+
+ down_write(&cttype->mod_lock);
+ err = idr_alloc(&cttype->mod_idr, cmod, 0, 0, GFP_KERNEL);
+ if (err >= 0) {
+ cttype->count += range_size(cttype, &range);
+ if (cttype->desc.module_load)
+ cttype->desc.module_load(cttype, cmod);
+ }
+ up_write(&cttype->mod_lock);
+
+ if (err < 0) {
+ kfree(cmod);
+ return err;
+ }
+
+ return 0;
+}
+
+void codetag_load_module(struct module *mod)
+{
+ struct codetag_type *cttype;
+
+ if (!mod)
+ return;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link)
+ codetag_module_init(cttype, mod);
+ mutex_unlock(&codetag_lock);
+}
+
+bool codetag_unload_module(struct module *mod)
+{
+ struct codetag_type *cttype;
+ bool unload_ok = true;
+
+ if (!mod)
+ return true;
+
+ mutex_lock(&codetag_lock);
+ list_for_each_entry(cttype, &codetag_types, link) {
+ struct codetag_module *found = NULL;
+ struct codetag_module *cmod;
+ unsigned long mod_id, tmp;
+
+ down_write(&cttype->mod_lock);
+ idr_for_each_entry_ul(&cttype->mod_idr, cmod, tmp, mod_id) {
+ if (cmod->mod && cmod->mod == mod) {
+ found = cmod;
+ break;
+ }
+ }
+ if (found) {
+ if (cttype->desc.module_unload)
+ if (!cttype->desc.module_unload(cttype, cmod))
+ unload_ok = false;
+
+ cttype->count -= range_size(cttype, &cmod->range);
+ idr_remove(&cttype->mod_idr, mod_id);
+ kfree(cmod);
+ }
+ up_write(&cttype->mod_lock);
+ }
+ mutex_unlock(&codetag_lock);
+
+ return unload_ok;
+}
+
+#else /* CONFIG_MODULES */
+static int codetag_module_init(struct codetag_type *cttype, struct module *mod) { return 0; }
+#endif /* CONFIG_MODULES */
+
+struct codetag_type *
+codetag_register_type(const struct codetag_type_desc *desc)
+{
+ struct codetag_type *cttype;
+ int err;
+
+ BUG_ON(desc->tag_size <= 0);
+
+ cttype = kzalloc(sizeof(*cttype), GFP_KERNEL);
+ if (unlikely(!cttype))
+ return ERR_PTR(-ENOMEM);
+
+ cttype->desc = *desc;
+ idr_init(&cttype->mod_idr);
+ init_rwsem(&cttype->mod_lock);
+
+ err = codetag_module_init(cttype, NULL);
+ if (unlikely(err)) {
+ kfree(cttype);
+ return ERR_PTR(err);
+ }
+
+ mutex_lock(&codetag_lock);
+ list_add_tail(&cttype->link, &codetag_types);
+ mutex_unlock(&codetag_lock);
+
+ return cttype;
+}
diff --git a/lib/crypto/Kconfig b/lib/crypto/Kconfig
index 45436bfc6dff..b01253cac70a 100644
--- a/lib/crypto/Kconfig
+++ b/lib/crypto/Kconfig
@@ -8,6 +8,11 @@ config CRYPTO_LIB_UTILS
config CRYPTO_LIB_AES
tristate
+config CRYPTO_LIB_AESCFB
+ tristate
+ select CRYPTO_LIB_AES
+ select CRYPTO_LIB_UTILS
+
config CRYPTO_LIB_AESGCM
tristate
select CRYPTO_LIB_AES
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 8d1446c2be71..969baab8c805 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -10,6 +10,9 @@ obj-$(CONFIG_CRYPTO_LIB_CHACHA_GENERIC) += libchacha.o
obj-$(CONFIG_CRYPTO_LIB_AES) += libaes.o
libaes-y := aes.o
+obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o
+libaescfb-y := aescfb.o
+
obj-$(CONFIG_CRYPTO_LIB_AESGCM) += libaesgcm.o
libaesgcm-y := aesgcm.o
diff --git a/lib/crypto/aescfb.c b/lib/crypto/aescfb.c
new file mode 100644
index 000000000000..749dc1258a44
--- /dev/null
+++ b/lib/crypto/aescfb.c
@@ -0,0 +1,257 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Minimal library implementation of AES in CFB mode
+ *
+ * Copyright 2023 Google LLC
+ */
+
+#include <linux/module.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+
+#include <asm/irqflags.h>
+
+static void aescfb_encrypt_block(const struct crypto_aes_ctx *ctx, void *dst,
+ const void *src)
+{
+ unsigned long flags;
+
+ /*
+ * In AES-CFB, the AES encryption operates on known 'plaintext' (the IV
+ * and ciphertext), making it susceptible to timing attacks on the
+ * encryption key. The AES library already mitigates this risk to some
+ * extent by pulling the entire S-box into the caches before doing any
+ * substitutions, but this strategy is more effective when running with
+ * interrupts disabled.
+ */
+ local_irq_save(flags);
+ aes_encrypt(ctx, dst, src);
+ local_irq_restore(flags);
+}
+
+/**
+ * aescfb_encrypt - Perform AES-CFB encryption on a block of data
+ *
+ * @ctx: The AES-CFB key schedule
+ * @dst: Pointer to the ciphertext output buffer
+ * @src: Pointer the plaintext (may equal @dst for encryption in place)
+ * @len: The size in bytes of the plaintext and ciphertext.
+ * @iv: The initialization vector (IV) to use for this block of data
+ */
+void aescfb_encrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE])
+{
+ u8 ks[AES_BLOCK_SIZE];
+ const u8 *v = iv;
+
+ while (len > 0) {
+ aescfb_encrypt_block(ctx, ks, v);
+ crypto_xor_cpy(dst, src, ks, min(len, AES_BLOCK_SIZE));
+ v = dst;
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ len -= AES_BLOCK_SIZE;
+ }
+
+ memzero_explicit(ks, sizeof(ks));
+}
+EXPORT_SYMBOL(aescfb_encrypt);
+
+/**
+ * aescfb_decrypt - Perform AES-CFB decryption on a block of data
+ *
+ * @ctx: The AES-CFB key schedule
+ * @dst: Pointer to the plaintext output buffer
+ * @src: Pointer the ciphertext (may equal @dst for decryption in place)
+ * @len: The size in bytes of the plaintext and ciphertext.
+ * @iv: The initialization vector (IV) to use for this block of data
+ */
+void aescfb_decrypt(const struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src,
+ int len, const u8 iv[AES_BLOCK_SIZE])
+{
+ u8 ks[2][AES_BLOCK_SIZE];
+
+ aescfb_encrypt_block(ctx, ks[0], iv);
+
+ for (int i = 0; len > 0; i ^= 1) {
+ if (len > AES_BLOCK_SIZE)
+ /*
+ * Generate the keystream for the next block before
+ * performing the XOR, as that may update in place and
+ * overwrite the ciphertext.
+ */
+ aescfb_encrypt_block(ctx, ks[!i], src);
+
+ crypto_xor_cpy(dst, src, ks[i], min(len, AES_BLOCK_SIZE));
+
+ dst += AES_BLOCK_SIZE;
+ src += AES_BLOCK_SIZE;
+ len -= AES_BLOCK_SIZE;
+ }
+
+ memzero_explicit(ks, sizeof(ks));
+}
+EXPORT_SYMBOL(aescfb_decrypt);
+
+MODULE_DESCRIPTION("Generic AES-CFB library");
+MODULE_AUTHOR("Ard Biesheuvel <ardb@kernel.org>");
+MODULE_LICENSE("GPL");
+
+#ifndef CONFIG_CRYPTO_MANAGER_DISABLE_TESTS
+
+/*
+ * Test code below. Vectors taken from crypto/testmgr.h
+ */
+
+static struct {
+ u8 ptext[64];
+ u8 ctext[64];
+
+ u8 key[AES_MAX_KEY_SIZE];
+ u8 iv[AES_BLOCK_SIZE];
+
+ int klen;
+ int len;
+} const aescfb_tv[] __initconst = {
+ { /* From NIST SP800-38A */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8\xa6\x45\x37\xa0\xb3\xa9\x3f"
+ "\xcd\xe3\xcd\xad\x9f\x1c\xe5\x8b"
+ "\x26\x75\x1f\x67\xa3\xcb\xb1\x40"
+ "\xb1\x80\x8c\xf1\x87\xa4\xf4\xdf"
+ "\xc0\x4b\x05\x35\x7c\x5d\x1c\x0e"
+ "\xea\xc4\xc6\x6f\x9f\xf7\xf2\xe6",
+ .len = 64,
+ }, {
+ .key = "\x8e\x73\xb0\xf7\xda\x0e\x64\x52"
+ "\xc8\x10\xf3\x2b\x80\x90\x79\xe5"
+ "\x62\xf8\xea\xd2\x52\x2c\x6b\x7b",
+ .klen = 24,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xcd\xc8\x0d\x6f\xdd\xf1\x8c\xab"
+ "\x34\xc2\x59\x09\xc9\x9a\x41\x74"
+ "\x67\xce\x7f\x7f\x81\x17\x36\x21"
+ "\x96\x1a\x2b\x70\x17\x1d\x3d\x7a"
+ "\x2e\x1e\x8a\x1d\xd5\x9b\x88\xb1"
+ "\xc8\xe6\x0f\xed\x1e\xfa\xc4\xc9"
+ "\xc0\x5f\x9f\x9c\xa9\x83\x4f\xa0"
+ "\x42\xae\x8f\xba\x58\x4b\x09\xff",
+ .len = 64,
+ }, {
+ .key = "\x60\x3d\xeb\x10\x15\xca\x71\xbe"
+ "\x2b\x73\xae\xf0\x85\x7d\x77\x81"
+ "\x1f\x35\x2c\x07\x3b\x61\x08\xd7"
+ "\x2d\x98\x10\xa3\x09\x14\xdf\xf4",
+ .klen = 32,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae\x2d\x8a\x57\x1e\x03\xac\x9c"
+ "\x9e\xb7\x6f\xac\x45\xaf\x8e\x51"
+ "\x30\xc8\x1c\x46\xa3\x5c\xe4\x11"
+ "\xe5\xfb\xc1\x19\x1a\x0a\x52\xef"
+ "\xf6\x9f\x24\x45\xdf\x4f\x9b\x17"
+ "\xad\x2b\x41\x7b\xe6\x6c\x37\x10",
+ .ctext = "\xdc\x7e\x84\xbf\xda\x79\x16\x4b"
+ "\x7e\xcd\x84\x86\x98\x5d\x38\x60"
+ "\x39\xff\xed\x14\x3b\x28\xb1\xc8"
+ "\x32\x11\x3c\x63\x31\xe5\x40\x7b"
+ "\xdf\x10\x13\x24\x15\xe5\x4b\x92"
+ "\xa1\x3e\xd0\xa8\x26\x7a\xe2\xf9"
+ "\x75\xa3\x85\x74\x1a\xb9\xce\xf8"
+ "\x20\x31\x62\x3d\x55\xb1\xe4\x71",
+ .len = 64,
+ }, { /* > 16 bytes, not a multiple of 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f\x96"
+ "\xe9\x3d\x7e\x11\x73\x93\x17\x2a"
+ "\xae",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad\x20"
+ "\x33\x34\x49\xf8\xe8\x3c\xfb\x4a"
+ "\xc8",
+ .len = 17,
+ }, { /* < 16 bytes */
+ .key = "\x2b\x7e\x15\x16\x28\xae\xd2\xa6"
+ "\xab\xf7\x15\x88\x09\xcf\x4f\x3c",
+ .klen = 16,
+ .iv = "\x00\x01\x02\x03\x04\x05\x06\x07"
+ "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
+ .ptext = "\x6b\xc1\xbe\xe2\x2e\x40\x9f",
+ .ctext = "\x3b\x3f\xd9\x2e\xb7\x2d\xad",
+ .len = 7,
+ },
+};
+
+static int __init libaescfb_init(void)
+{
+ for (int i = 0; i < ARRAY_SIZE(aescfb_tv); i++) {
+ struct crypto_aes_ctx ctx;
+ u8 buf[64];
+
+ if (aes_expandkey(&ctx, aescfb_tv[i].key, aescfb_tv[i].klen)) {
+ pr_err("aes_expandkey() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ aescfb_encrypt(&ctx, buf, aescfb_tv[i].ptext, aescfb_tv[i].len,
+ aescfb_tv[i].iv);
+ if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) {
+ pr_err("aescfb_encrypt() #1 failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* decrypt in place */
+ aescfb_decrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv);
+ if (memcmp(buf, aescfb_tv[i].ptext, aescfb_tv[i].len)) {
+ pr_err("aescfb_decrypt() failed on vector %d\n", i);
+ return -ENODEV;
+ }
+
+ /* encrypt in place */
+ aescfb_encrypt(&ctx, buf, buf, aescfb_tv[i].len, aescfb_tv[i].iv);
+ if (memcmp(buf, aescfb_tv[i].ctext, aescfb_tv[i].len)) {
+ pr_err("aescfb_encrypt() #2 failed on vector %d\n", i);
+
+ return -ENODEV;
+ }
+
+ }
+ return 0;
+}
+module_init(libaescfb_init);
+
+static void __exit libaescfb_exit(void)
+{
+}
+module_exit(libaescfb_exit);
+#endif
diff --git a/lib/dim/Makefile b/lib/dim/Makefile
index 1d6858a108cb..c4cc4026c451 100644
--- a/lib/dim/Makefile
+++ b/lib/dim/Makefile
@@ -2,6 +2,6 @@
# DIM Dynamic Interrupt Moderation library
#
-obj-$(CONFIG_DIMLIB) += dim.o
+obj-$(CONFIG_DIMLIB) += dimlib.o
-dim-y := dim.o net_dim.o rdma_dim.o
+dimlib-objs := dim.o net_dim.o rdma_dim.o
diff --git a/lib/dim/dim.c b/lib/dim/dim.c
index e89aaf07bde5..83b65ac74d73 100644
--- a/lib/dim/dim.c
+++ b/lib/dim/dim.c
@@ -82,3 +82,6 @@ bool dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
return true;
}
EXPORT_SYMBOL(dim_calc_stats);
+
+MODULE_DESCRIPTION("Dynamic Interrupt Moderation (DIM) library");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c
index c78f335fa981..f2c5e7910bb1 100644
--- a/lib/dynamic_debug.c
+++ b/lib/dynamic_debug.c
@@ -302,7 +302,11 @@ static int ddebug_tokenize(char *buf, char *words[], int maxwords)
} else {
for (end = buf; *end && !isspace(*end); end++)
;
- BUG_ON(end == buf);
+ if (end == buf) {
+ pr_err("parse err after word:%d=%s\n", nwords,
+ nwords ? words[nwords - 1] : "<none>");
+ return -EINVAL;
+ }
}
/* `buf' is start of word, `end' is one past its end */
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index a1389db1c30a..e49deddd3de9 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -15,12 +15,10 @@
#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
-static void dql_check_stall(struct dql *dql)
+static void dql_check_stall(struct dql *dql, unsigned short stall_thrs)
{
- unsigned short stall_thrs;
unsigned long now;
- stall_thrs = READ_ONCE(dql->stall_thrs);
if (!stall_thrs)
return;
@@ -86,9 +84,16 @@ void dql_completed(struct dql *dql, unsigned int count)
{
unsigned int inprogress, prev_inprogress, limit;
unsigned int ovlimit, completed, num_queued;
+ unsigned short stall_thrs;
bool all_prev_completed;
num_queued = READ_ONCE(dql->num_queued);
+ /* Read stall_thrs in advance since it belongs to the same (first)
+ * cache line as ->num_queued. This way, dql_check_stall() does not
+ * need to touch the first cache line again later, reducing the window
+ * of possible false sharing.
+ */
+ stall_thrs = READ_ONCE(dql->stall_thrs);
/* Can't complete more than what's in queue */
BUG_ON(count > num_queued - dql->num_completed);
@@ -178,7 +183,7 @@ void dql_completed(struct dql *dql, unsigned int count)
dql->num_completed = completed;
dql->prev_num_queued = num_queued;
- dql_check_stall(dql);
+ dql_check_stall(dql, stall_thrs);
}
EXPORT_SYMBOL(dql_completed);
diff --git a/lib/find_bit.c b/lib/find_bit.c
index 32f99e9a670e..dacadd904250 100644
--- a/lib/find_bit.c
+++ b/lib/find_bit.c
@@ -116,6 +116,18 @@ unsigned long _find_first_and_bit(const unsigned long *addr1,
EXPORT_SYMBOL(_find_first_and_bit);
#endif
+/*
+ * Find the first set bit in three memory regions.
+ */
+unsigned long _find_first_and_and_bit(const unsigned long *addr1,
+ const unsigned long *addr2,
+ const unsigned long *addr3,
+ unsigned long size)
+{
+ return FIND_FIRST_BIT(addr1[idx] & addr2[idx] & addr3[idx], /* nop */, size);
+}
+EXPORT_SYMBOL(_find_first_and_and_bit);
+
#ifndef find_first_zero_bit
/*
* Find the first cleared bit in a memory region.
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index 493ec02dd5b3..d2377e00caab 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Runtime test cases for CONFIG_FORTIFY_SOURCE. For testing memcpy(),
- * see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
+ * Runtime test cases for CONFIG_FORTIFY_SOURCE. For additional memcpy()
+ * testing see FORTIFY_MEM_* tests in LKDTM (drivers/misc/lkdtm/fortify.c).
*
* For corner cases with UBSAN, try testing with:
*
@@ -15,14 +15,31 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+/* We don't need to fill dmesg with the fortify WARNs during testing. */
+#ifdef DEBUG
+# define FORTIFY_REPORT_KUNIT(x...) __fortify_report(x)
+# define FORTIFY_WARN_KUNIT(x...) WARN_ONCE(x)
+#else
+# define FORTIFY_REPORT_KUNIT(x...) do { } while (0)
+# define FORTIFY_WARN_KUNIT(x...) do { } while (0)
+#endif
+
/* Redefine fortify_panic() to track failures. */
void fortify_add_kunit_error(int write);
#define fortify_panic(func, write, avail, size, retfail) do { \
- __fortify_report(FORTIFY_REASON(func, write), avail, size); \
+ FORTIFY_REPORT_KUNIT(FORTIFY_REASON(func, write), avail, size); \
fortify_add_kunit_error(write); \
return (retfail); \
} while (0)
+/* Redefine fortify_warn_once() to track memcpy() failures. */
+#define fortify_warn_once(chk_func, x...) do { \
+ bool __result = chk_func; \
+ FORTIFY_WARN_KUNIT(__result, x); \
+ if (__result) \
+ fortify_add_kunit_error(1); \
+} while (0)
+
#include <kunit/device.h>
#include <kunit/test.h>
#include <kunit/test-bug.h>
@@ -64,7 +81,7 @@ void fortify_add_kunit_error(int write)
kunit_put_resource(resource);
}
-static void known_sizes_test(struct kunit *test)
+static void fortify_test_known_sizes(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8);
KUNIT_EXPECT_EQ(test, __compiletime_strlen(array_of_10), 10);
@@ -97,7 +114,7 @@ static noinline size_t want_minus_one(int pick)
return __compiletime_strlen(str);
}
-static void control_flow_split_test(struct kunit *test)
+static void fortify_test_control_flow_split(struct kunit *test)
{
KUNIT_EXPECT_EQ(test, want_minus_one(pick), SIZE_MAX);
}
@@ -173,11 +190,11 @@ static volatile size_t unknown_size = 50;
#endif
#define DEFINE_ALLOC_SIZE_TEST_PAIR(allocator) \
-static void alloc_size_##allocator##_const_test(struct kunit *test) \
+static void fortify_test_alloc_size_##allocator##_const(struct kunit *test) \
{ \
CONST_TEST_BODY(TEST_##allocator); \
} \
-static void alloc_size_##allocator##_dynamic_test(struct kunit *test) \
+static void fortify_test_alloc_size_##allocator##_dynamic(struct kunit *test) \
{ \
DYNAMIC_TEST_BODY(TEST_##allocator); \
}
@@ -267,28 +284,28 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(vmalloc)
\
checker((expected_pages) * PAGE_SIZE, \
kvmalloc((alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvzalloc((alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvzalloc_node((alloc_pages) * PAGE_SIZE, gfp, NUMA_NO_NODE), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvcalloc(1, (alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvcalloc((alloc_pages) * PAGE_SIZE, 1, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_array(1, (alloc_pages) * PAGE_SIZE, gfp), \
- vfree(p)); \
+ kvfree(p)); \
checker((expected_pages) * PAGE_SIZE, \
kvmalloc_array((alloc_pages) * PAGE_SIZE, 1, gfp), \
- vfree(p)); \
+ kvfree(p)); \
\
prev_size = (expected_pages) * PAGE_SIZE; \
orig = kvmalloc(prev_size, gfp); \
@@ -346,6 +363,31 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
+static const char * const test_strs[] = {
+ "",
+ "Hello there",
+ "A longer string, just for variety",
+};
+
+#define TEST_realloc(checker) do { \
+ gfp_t gfp = GFP_KERNEL; \
+ size_t len; \
+ int i; \
+ \
+ for (i = 0; i < ARRAY_SIZE(test_strs); i++) { \
+ len = strlen(test_strs[i]); \
+ KUNIT_EXPECT_EQ(test, __builtin_constant_p(len), 0); \
+ checker(len, kmemdup_array(test_strs[i], len, 1, gfp), \
+ kfree(p)); \
+ checker(len, kmemdup(test_strs[i], len, gfp), \
+ kfree(p)); \
+ } \
+} while (0)
+static void fortify_test_realloc_size(struct kunit *test)
+{
+ TEST_realloc(check_dynamic);
+}
+
/*
* We can't have an array at the end of a structure or else
* builds without -fstrict-flex-arrays=3 will report them as
@@ -361,7 +403,7 @@ struct fortify_padding {
/* Force compiler into not being able to resolve size at compile-time. */
static volatile int unconst;
-static void strlen_test(struct kunit *test)
+static void fortify_test_strlen(struct kunit *test)
{
struct fortify_padding pad = { };
int i, end = sizeof(pad.buf) - 1;
@@ -384,7 +426,7 @@ static void strlen_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
}
-static void strnlen_test(struct kunit *test)
+static void fortify_test_strnlen(struct kunit *test)
{
struct fortify_padding pad = { };
int i, end = sizeof(pad.buf) - 1;
@@ -422,7 +464,7 @@ static void strnlen_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void strcpy_test(struct kunit *test)
+static void fortify_test_strcpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[sizeof(pad.buf) + 1] = { };
@@ -480,7 +522,7 @@ static void strcpy_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strncpy_test(struct kunit *test)
+static void fortify_test_strncpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
@@ -539,7 +581,7 @@ static void strncpy_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strscpy_test(struct kunit *test)
+static void fortify_test_strscpy(struct kunit *test)
{
struct fortify_padding pad = { };
char src[] = "Copy me fully into a small buffer and I will overflow!";
@@ -596,7 +638,7 @@ static void strscpy_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strcat_test(struct kunit *test)
+static void fortify_test_strcat(struct kunit *test)
{
struct fortify_padding pad = { };
char src[sizeof(pad.buf) / 2] = { };
@@ -653,7 +695,7 @@ static void strcat_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strncat_test(struct kunit *test)
+static void fortify_test_strncat(struct kunit *test)
{
struct fortify_padding pad = { };
char src[sizeof(pad.buf)] = { };
@@ -726,7 +768,7 @@ static void strncat_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void strlcat_test(struct kunit *test)
+static void fortify_test_strlcat(struct kunit *test)
{
struct fortify_padding pad = { };
char src[sizeof(pad.buf)] = { };
@@ -811,7 +853,75 @@ static void strlcat_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, pad.bytes_after, 0);
}
-static void memscan_test(struct kunit *test)
+/* Check for 0-sized arrays... */
+struct fortify_zero_sized {
+ unsigned long bytes_before;
+ char buf[0];
+ unsigned long bytes_after;
+};
+
+#define __fortify_test(memfunc) \
+static void fortify_test_##memfunc(struct kunit *test) \
+{ \
+ struct fortify_zero_sized zero = { }; \
+ struct fortify_padding pad = { }; \
+ char srcA[sizeof(pad.buf) + 2]; \
+ char srcB[sizeof(pad.buf) + 2]; \
+ size_t len = sizeof(pad.buf) + unconst; \
+ \
+ memset(srcA, 'A', sizeof(srcA)); \
+ KUNIT_ASSERT_EQ(test, srcA[0], 'A'); \
+ memset(srcB, 'B', sizeof(srcB)); \
+ KUNIT_ASSERT_EQ(test, srcB[0], 'B'); \
+ \
+ memfunc(pad.buf, srcA, 0 + unconst); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf + 1, srcB, 1 + unconst); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], '\0'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[2], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, 1 + unconst); \
+ KUNIT_EXPECT_EQ(test, pad.buf[0], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'B'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len - 1); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[len - 1], '\0'); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len); \
+ KUNIT_EXPECT_EQ(test, pad.buf[1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.buf[len - 1], 'A'); \
+ KUNIT_EXPECT_EQ(test, pad.bytes_after, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ memfunc(pad.buf, srcA, len + 1); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 1); \
+ memfunc(pad.buf + 1, srcB, len); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 2); \
+ \
+ /* Reset error counter. */ \
+ fortify_write_overflows = 0; \
+ /* Copy nothing into nothing: no errors. */ \
+ memfunc(zero.buf, srcB, 0 + unconst); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+ /* We currently explicitly ignore zero-sized dests. */ \
+ memfunc(zero.buf, srcB, 1 + unconst); \
+ KUNIT_EXPECT_EQ(test, fortify_read_overflows, 0); \
+ KUNIT_EXPECT_EQ(test, fortify_write_overflows, 0); \
+}
+__fortify_test(memcpy)
+__fortify_test(memmove)
+
+static void fortify_test_memscan(struct kunit *test)
{
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
@@ -830,7 +940,7 @@ static void memscan_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void memchr_test(struct kunit *test)
+static void fortify_test_memchr(struct kunit *test)
{
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + strlen("Where oh where is ");
@@ -849,7 +959,7 @@ static void memchr_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void memchr_inv_test(struct kunit *test)
+static void fortify_test_memchr_inv(struct kunit *test)
{
char haystack[] = "Where oh where is my memory range?";
char *mem = haystack + 1;
@@ -869,7 +979,7 @@ static void memchr_inv_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void memcmp_test(struct kunit *test)
+static void fortify_test_memcmp(struct kunit *test)
{
char one[] = "My mind is going ...";
char two[] = "My mind is going ... I can feel it.";
@@ -891,7 +1001,7 @@ static void memcmp_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
}
-static void kmemdup_test(struct kunit *test)
+static void fortify_test_kmemdup(struct kunit *test)
{
char src[] = "I got Doom running on it!";
char *copy;
@@ -917,19 +1027,19 @@ static void kmemdup_test(struct kunit *test)
/* Out of bounds by 1 byte. */
copy = kmemdup(src, len + 1, GFP_KERNEL);
- KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 1);
kfree(copy);
/* Way out of bounds. */
copy = kmemdup(src, len * 2, GFP_KERNEL);
- KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 2);
kfree(copy);
/* Starting offset causing out of bounds. */
copy = kmemdup(src + 1, len, GFP_KERNEL);
- KUNIT_EXPECT_NULL(test, copy);
+ KUNIT_EXPECT_PTR_EQ(test, copy, ZERO_SIZE_PTR);
KUNIT_EXPECT_EQ(test, fortify_read_overflows, 3);
kfree(copy);
}
@@ -951,31 +1061,33 @@ static int fortify_test_init(struct kunit *test)
}
static struct kunit_case fortify_test_cases[] = {
- KUNIT_CASE(known_sizes_test),
- KUNIT_CASE(control_flow_split_test),
- KUNIT_CASE(alloc_size_kmalloc_const_test),
- KUNIT_CASE(alloc_size_kmalloc_dynamic_test),
- KUNIT_CASE(alloc_size_vmalloc_const_test),
- KUNIT_CASE(alloc_size_vmalloc_dynamic_test),
- KUNIT_CASE(alloc_size_kvmalloc_const_test),
- KUNIT_CASE(alloc_size_kvmalloc_dynamic_test),
- KUNIT_CASE(alloc_size_devm_kmalloc_const_test),
- KUNIT_CASE(alloc_size_devm_kmalloc_dynamic_test),
- KUNIT_CASE(strlen_test),
- KUNIT_CASE(strnlen_test),
- KUNIT_CASE(strcpy_test),
- KUNIT_CASE(strncpy_test),
- KUNIT_CASE(strscpy_test),
- KUNIT_CASE(strcat_test),
- KUNIT_CASE(strncat_test),
- KUNIT_CASE(strlcat_test),
+ KUNIT_CASE(fortify_test_known_sizes),
+ KUNIT_CASE(fortify_test_control_flow_split),
+ KUNIT_CASE(fortify_test_alloc_size_kmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_kmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_vmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_vmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_kvmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_kvmalloc_dynamic),
+ KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_const),
+ KUNIT_CASE(fortify_test_alloc_size_devm_kmalloc_dynamic),
+ KUNIT_CASE(fortify_test_realloc_size),
+ KUNIT_CASE(fortify_test_strlen),
+ KUNIT_CASE(fortify_test_strnlen),
+ KUNIT_CASE(fortify_test_strcpy),
+ KUNIT_CASE(fortify_test_strncpy),
+ KUNIT_CASE(fortify_test_strscpy),
+ KUNIT_CASE(fortify_test_strcat),
+ KUNIT_CASE(fortify_test_strncat),
+ KUNIT_CASE(fortify_test_strlcat),
/* skip memset: performs bounds checking on whole structs */
- /* skip memcpy: still using warn-and-overwrite instead of hard-fail */
- KUNIT_CASE(memscan_test),
- KUNIT_CASE(memchr_test),
- KUNIT_CASE(memchr_inv_test),
- KUNIT_CASE(memcmp_test),
- KUNIT_CASE(kmemdup_test),
+ KUNIT_CASE(fortify_test_memcpy),
+ KUNIT_CASE(fortify_test_memmove),
+ KUNIT_CASE(fortify_test_memscan),
+ KUNIT_CASE(fortify_test_memchr),
+ KUNIT_CASE(fortify_test_memchr_inv),
+ KUNIT_CASE(fortify_test_memcmp),
+ KUNIT_CASE(fortify_test_kmemdup),
{}
};
diff --git a/lib/iomap_copy.c b/lib/iomap_copy.c
index 5de7c04e05ef..2fd5712fb7c0 100644
--- a/lib/iomap_copy.c
+++ b/lib/iomap_copy.c
@@ -16,9 +16,8 @@
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
-void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
- const void *from,
- size_t count)
+#ifndef __iowrite32_copy
+void __iowrite32_copy(void __iomem *to, const void *from, size_t count)
{
u32 __iomem *dst = to;
const u32 *src = from;
@@ -28,6 +27,7 @@ void __attribute__((weak)) __iowrite32_copy(void __iomem *to,
__raw_writel(*src++, dst++);
}
EXPORT_SYMBOL_GPL(__iowrite32_copy);
+#endif
/**
* __ioread32_copy - copy data from MMIO space, in 32-bit units
@@ -60,9 +60,8 @@ EXPORT_SYMBOL_GPL(__ioread32_copy);
* time. Order of access is not guaranteed, nor is a memory barrier
* performed afterwards.
*/
-void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
- const void *from,
- size_t count)
+#ifndef __iowrite64_copy
+void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
{
#ifdef CONFIG_64BIT
u64 __iomem *dst = to;
@@ -75,5 +74,5 @@ void __attribute__((weak)) __iowrite64_copy(void __iomem *to,
__iowrite32_copy(to, from, count * 2);
#endif
}
-
EXPORT_SYMBOL_GPL(__iowrite64_copy);
+#endif
diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 68a6daec0aef..34d7242d526d 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -24,6 +24,17 @@ config KUNIT_DEBUGFS
test suite, which allow users to see results of the last test suite
run that occurred.
+config KUNIT_FAULT_TEST
+ bool "Enable KUnit tests which print BUG stacktraces"
+ depends on KUNIT_TEST
+ depends on !UML
+ default y
+ help
+ Enables fault handling tests for the KUnit framework. These tests may
+ trigger a kernel BUG(), and the associated stack trace, even when they
+ pass. If this conflicts with your test infrastrcture (or is confusing
+ or annoying), they can be disabled by setting this to N.
+
config KUNIT_TEST
tristate "KUnit test for KUnit" if !KUNIT_ALL_TESTS
default KUNIT_ALL_TESTS
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
index abc603730b8e..25c81ed465fb 100644
--- a/lib/kunit/device.c
+++ b/lib/kunit/device.c
@@ -51,7 +51,7 @@ int kunit_bus_init(void)
error = bus_register(&kunit_bus_type);
if (error)
- bus_unregister(&kunit_bus_type);
+ root_device_unregister(kunit_bus_device);
return error;
}
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index f7980ef236a3..e3412e0ca399 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -109,6 +109,48 @@ static struct kunit_suite kunit_try_catch_test_suite = {
.test_cases = kunit_try_catch_test_cases,
};
+#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST)
+
+static void kunit_test_null_dereference(void *data)
+{
+ struct kunit *test = data;
+ int *null = NULL;
+
+ *null = 0;
+
+ KUNIT_FAIL(test, "This line should never be reached\n");
+}
+
+static void kunit_test_fault_null_dereference(struct kunit *test)
+{
+ struct kunit_try_catch_test_context *ctx = test->priv;
+ struct kunit_try_catch *try_catch = ctx->try_catch;
+
+ kunit_try_catch_init(try_catch,
+ test,
+ kunit_test_null_dereference,
+ kunit_test_catch);
+ kunit_try_catch_run(try_catch, test);
+
+ KUNIT_EXPECT_EQ(test, try_catch->try_result, -EINTR);
+ KUNIT_EXPECT_TRUE(test, ctx->function_called);
+}
+
+#endif /* CONFIG_KUNIT_FAULT_TEST */
+
+static struct kunit_case kunit_fault_test_cases[] = {
+#if IS_ENABLED(CONFIG_KUNIT_FAULT_TEST)
+ KUNIT_CASE(kunit_test_fault_null_dereference),
+#endif /* CONFIG_KUNIT_FAULT_TEST */
+ {}
+};
+
+static struct kunit_suite kunit_fault_test_suite = {
+ .name = "kunit_fault",
+ .init = kunit_try_catch_test_init,
+ .test_cases = kunit_fault_test_cases,
+};
+
/*
* Context for testing test managed resources
* is_resource_initialized is used to test arbitrary resources
@@ -826,6 +868,7 @@ static struct kunit_suite kunit_current_test_suite = {
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_log_test_suite, &kunit_status_test_suite,
- &kunit_current_test_suite, &kunit_device_test_suite);
+ &kunit_current_test_suite, &kunit_device_test_suite,
+ &kunit_fault_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
index 03fb511826f7..7511442ea98f 100644
--- a/lib/kunit/string-stream-test.c
+++ b/lib/kunit/string-stream-test.c
@@ -22,18 +22,10 @@ struct string_stream_test_priv {
};
/* Avoids a cast warning if kfree() is passed direct to kunit_add_action(). */
-static void kfree_wrapper(void *p)
-{
- kfree(p);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
/* Avoids a cast warning if string_stream_destroy() is passed direct to kunit_add_action(). */
-static void cleanup_raw_stream(void *p)
-{
- struct string_stream *stream = p;
-
- string_stream_destroy(stream);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(cleanup_raw_stream, string_stream_destroy, struct string_stream *);
static char *get_concatenated_string(struct kunit *test, struct string_stream *stream)
{
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 1d1475578515..b8514dbb337c 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -712,6 +712,9 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
{
unsigned int i;
+ if (num_suites == 0)
+ return 0;
+
if (!kunit_enabled() && num_suites > 0) {
pr_info("kunit: disabled\n");
return 0;
diff --git a/lib/kunit/try-catch.c b/lib/kunit/try-catch.c
index f7825991d576..6bbe0025b079 100644
--- a/lib/kunit/try-catch.c
+++ b/lib/kunit/try-catch.c
@@ -11,13 +11,14 @@
#include <linux/completion.h>
#include <linux/kernel.h>
#include <linux/kthread.h>
+#include <linux/sched/task.h>
#include "try-catch-impl.h"
void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch)
{
try_catch->try_result = -EFAULT;
- kthread_complete_and_exit(try_catch->try_completion, -EFAULT);
+ kthread_exit(0);
}
EXPORT_SYMBOL_GPL(kunit_try_catch_throw);
@@ -25,9 +26,12 @@ static int kunit_generic_run_threadfn_adapter(void *data)
{
struct kunit_try_catch *try_catch = data;
+ try_catch->try_result = -EINTR;
try_catch->try(try_catch->context);
+ if (try_catch->try_result == -EINTR)
+ try_catch->try_result = 0;
- kthread_complete_and_exit(try_catch->try_completion, 0);
+ return 0;
}
static unsigned long kunit_test_timeout(void)
@@ -57,30 +61,38 @@ static unsigned long kunit_test_timeout(void)
void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
{
- DECLARE_COMPLETION_ONSTACK(try_completion);
struct kunit *test = try_catch->test;
struct task_struct *task_struct;
+ struct completion *task_done;
int exit_code, time_remaining;
try_catch->context = context;
- try_catch->try_completion = &try_completion;
try_catch->try_result = 0;
- task_struct = kthread_run(kunit_generic_run_threadfn_adapter,
- try_catch,
- "kunit_try_catch_thread");
+ task_struct = kthread_create(kunit_generic_run_threadfn_adapter,
+ try_catch, "kunit_try_catch_thread");
if (IS_ERR(task_struct)) {
+ try_catch->try_result = PTR_ERR(task_struct);
try_catch->catch(try_catch->context);
return;
}
+ get_task_struct(task_struct);
+ /*
+ * As for a vfork(2), task_struct->vfork_done (pointing to the
+ * underlying kthread->exited) can be used to wait for the end of a
+ * kernel thread. It is set to NULL when the thread exits, so we
+ * keep a copy here.
+ */
+ task_done = task_struct->vfork_done;
+ wake_up_process(task_struct);
- time_remaining = wait_for_completion_timeout(&try_completion,
+ time_remaining = wait_for_completion_timeout(task_done,
kunit_test_timeout());
if (time_remaining == 0) {
- kunit_err(test, "try timed out\n");
try_catch->try_result = -ETIMEDOUT;
kthread_stop(task_struct);
}
+ put_task_struct(task_struct);
exit_code = try_catch->try_result;
if (!exit_code)
@@ -88,8 +100,14 @@ void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context)
if (exit_code == -EFAULT)
try_catch->try_result = 0;
- else if (exit_code == -EINTR)
- kunit_err(test, "wake_up_process() was never called\n");
+ else if (exit_code == -EINTR) {
+ if (test->last_seen.file)
+ kunit_err(test, "try faulted: last line seen %s:%d\n",
+ test->last_seen.file, test->last_seen.line);
+ else
+ kunit_err(test, "try faulted\n");
+ } else if (exit_code == -ETIMEDOUT)
+ kunit_err(test, "try timed out\n");
else if (exit_code)
kunit_err(test, "Unknown error: %d\n", exit_code);
diff --git a/lib/kunit_iov_iter.c b/lib/kunit_iov_iter.c
index 859b67c4d697..27e0c8ee71d8 100644
--- a/lib/kunit_iov_iter.c
+++ b/lib/kunit_iov_iter.c
@@ -139,7 +139,7 @@ static void __init iov_kunit_copy_to_kvec(struct kunit *test)
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -194,7 +194,7 @@ stop:
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
struct bvec_test_range {
@@ -302,7 +302,7 @@ static void __init iov_kunit_copy_to_bvec(struct kunit *test)
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -359,7 +359,7 @@ stop:
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
static void iov_kunit_destroy_xarray(void *data)
@@ -453,7 +453,7 @@ static void __init iov_kunit_copy_to_xarray(struct kunit *test)
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -516,7 +516,7 @@ stop:
return;
}
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -596,7 +596,7 @@ static void __init iov_kunit_extract_pages_kvec(struct kunit *test)
stop:
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_EQ(test, iter.count, 0);
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -674,7 +674,7 @@ static void __init iov_kunit_extract_pages_bvec(struct kunit *test)
stop:
KUNIT_EXPECT_EQ(test, size, 0);
KUNIT_EXPECT_EQ(test, iter.count, 0);
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
/*
@@ -753,7 +753,7 @@ static void __init iov_kunit_extract_pages_xarray(struct kunit *test)
}
stop:
- KUNIT_SUCCEED();
+ KUNIT_SUCCEED(test);
}
static struct kunit_case __refdata iov_kunit_cases[] = {
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index 55e1b35bf877..2d7d27e6ae3c 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -5109,18 +5109,18 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
if (size == 0 || max - min < size - 1)
return -EINVAL;
- if (mas_is_start(mas)) {
+ if (mas_is_start(mas))
mas_start(mas);
- mas->offset = mas_data_end(mas);
- } else if (mas->offset >= 2) {
- mas->offset -= 2;
- } else if (!mas_rewind_node(mas)) {
+ else if ((mas->offset < 2) && (!mas_rewind_node(mas)))
return -EBUSY;
- }
- /* Empty set. */
- if (mas_is_none(mas) || mas_is_ptr(mas))
+ if (unlikely(mas_is_none(mas) || mas_is_ptr(mas)))
return mas_sparse_area(mas, min, max, size, false);
+ else if (mas->offset >= 2)
+ mas->offset -= 2;
+ else
+ mas->offset = mas_data_end(mas);
+
/* The start of the window can only be within these values. */
mas->index = min;
diff --git a/lib/math/prime_numbers.c b/lib/math/prime_numbers.c
index d42cebf7407f..d3b64b10da1c 100644
--- a/lib/math/prime_numbers.c
+++ b/lib/math/prime_numbers.c
@@ -6,8 +6,6 @@
#include <linux/prime_numbers.h>
#include <linux/slab.h>
-#define bitmap_size(nbits) (BITS_TO_LONGS(nbits) * sizeof(unsigned long))
-
struct primes {
struct rcu_head rcu;
unsigned long last, sz;
diff --git a/lib/memcpy_kunit.c b/lib/memcpy_kunit.c
index fd16e6ce53d1..20ea9038c3ff 100644
--- a/lib/memcpy_kunit.c
+++ b/lib/memcpy_kunit.c
@@ -493,58 +493,6 @@ static void memmove_overlap_test(struct kunit *test)
}
}
-static void strtomem_test(struct kunit *test)
-{
- static const char input[sizeof(unsigned long)] = "hi";
- static const char truncate[] = "this is too long";
- struct {
- unsigned long canary1;
- unsigned char output[sizeof(unsigned long)] __nonstring;
- unsigned long canary2;
- } wrap;
-
- memset(&wrap, 0xFF, sizeof(wrap));
- KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
- "bad initial canary value");
- KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
- "bad initial canary value");
-
- /* Check unpadded copy leaves surroundings untouched. */
- strtomem(wrap.output, input);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
- KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
- for (size_t i = 2; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-
- /* Check truncated copy leaves surroundings untouched. */
- memset(&wrap, 0xFF, sizeof(wrap));
- strtomem(wrap.output, truncate);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- for (size_t i = 0; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-
- /* Check padded copy leaves only string padded. */
- memset(&wrap, 0xFF, sizeof(wrap));
- strtomem_pad(wrap.output, input, 0xAA);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
- KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
- for (size_t i = 2; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-
- /* Check truncated padded copy has no padding. */
- memset(&wrap, 0xFF, sizeof(wrap));
- strtomem(wrap.output, truncate);
- KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
- for (size_t i = 0; i < sizeof(wrap.output); i++)
- KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
- KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
-}
-
static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE(memset_test),
KUNIT_CASE(memcpy_test),
@@ -552,7 +500,6 @@ static struct kunit_case memcpy_test_cases[] = {
KUNIT_CASE_SLOW(memmove_test),
KUNIT_CASE_SLOW(memmove_large_test),
KUNIT_CASE_SLOW(memmove_overlap_test),
- KUNIT_CASE(strtomem_test),
{}
};
diff --git a/lib/objpool.c b/lib/objpool.c
index cfdc02420884..234f9d0bd081 100644
--- a/lib/objpool.c
+++ b/lib/objpool.c
@@ -50,7 +50,7 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
{
int i, cpu_count = 0;
- for (i = 0; i < pool->nr_cpus; i++) {
+ for (i = 0; i < nr_cpu_ids; i++) {
struct objpool_slot *slot;
int nodes, size, rc;
@@ -60,8 +60,8 @@ objpool_init_percpu_slots(struct objpool_head *pool, int nr_objs,
continue;
/* compute how many objects to be allocated with this slot */
- nodes = nr_objs / num_possible_cpus();
- if (cpu_count < (nr_objs % num_possible_cpus()))
+ nodes = nr_objs / pool->nr_possible_cpus;
+ if (cpu_count < (nr_objs % pool->nr_possible_cpus))
nodes++;
cpu_count++;
@@ -103,7 +103,7 @@ static void objpool_fini_percpu_slots(struct objpool_head *pool)
if (!pool->cpu_slots)
return;
- for (i = 0; i < pool->nr_cpus; i++)
+ for (i = 0; i < nr_cpu_ids; i++)
kvfree(pool->cpu_slots[i]);
kfree(pool->cpu_slots);
}
@@ -130,13 +130,13 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
/* initialize objpool pool */
memset(pool, 0, sizeof(struct objpool_head));
- pool->nr_cpus = nr_cpu_ids;
+ pool->nr_possible_cpus = num_possible_cpus();
pool->obj_size = object_size;
pool->capacity = capacity;
pool->gfp = gfp & ~__GFP_ZERO;
pool->context = context;
pool->release = release;
- slot_size = pool->nr_cpus * sizeof(struct objpool_slot);
+ slot_size = nr_cpu_ids * sizeof(struct objpool_slot);
pool->cpu_slots = kzalloc(slot_size, pool->gfp);
if (!pool->cpu_slots)
return -ENOMEM;
@@ -152,106 +152,6 @@ int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
}
EXPORT_SYMBOL_GPL(objpool_init);
-/* adding object to slot, abort if the slot was already full */
-static inline int
-objpool_try_add_slot(void *obj, struct objpool_head *pool, int cpu)
-{
- struct objpool_slot *slot = pool->cpu_slots[cpu];
- uint32_t head, tail;
-
- /* loading tail and head as a local snapshot, tail first */
- tail = READ_ONCE(slot->tail);
-
- do {
- head = READ_ONCE(slot->head);
- /* fault caught: something must be wrong */
- WARN_ON_ONCE(tail - head > pool->nr_objs);
- } while (!try_cmpxchg_acquire(&slot->tail, &tail, tail + 1));
-
- /* now the tail position is reserved for the given obj */
- WRITE_ONCE(slot->entries[tail & slot->mask], obj);
- /* update sequence to make this obj available for pop() */
- smp_store_release(&slot->last, tail + 1);
-
- return 0;
-}
-
-/* reclaim an object to object pool */
-int objpool_push(void *obj, struct objpool_head *pool)
-{
- unsigned long flags;
- int rc;
-
- /* disable local irq to avoid preemption & interruption */
- raw_local_irq_save(flags);
- rc = objpool_try_add_slot(obj, pool, raw_smp_processor_id());
- raw_local_irq_restore(flags);
-
- return rc;
-}
-EXPORT_SYMBOL_GPL(objpool_push);
-
-/* try to retrieve object from slot */
-static inline void *objpool_try_get_slot(struct objpool_head *pool, int cpu)
-{
- struct objpool_slot *slot = pool->cpu_slots[cpu];
- /* load head snapshot, other cpus may change it */
- uint32_t head = smp_load_acquire(&slot->head);
-
- while (head != READ_ONCE(slot->last)) {
- void *obj;
-
- /*
- * data visibility of 'last' and 'head' could be out of
- * order since memory updating of 'last' and 'head' are
- * performed in push() and pop() independently
- *
- * before any retrieving attempts, pop() must guarantee
- * 'last' is behind 'head', that is to say, there must
- * be available objects in slot, which could be ensured
- * by condition 'last != head && last - head <= nr_objs'
- * that is equivalent to 'last - head - 1 < nr_objs' as
- * 'last' and 'head' are both unsigned int32
- */
- if (READ_ONCE(slot->last) - head - 1 >= pool->nr_objs) {
- head = READ_ONCE(slot->head);
- continue;
- }
-
- /* obj must be retrieved before moving forward head */
- obj = READ_ONCE(slot->entries[head & slot->mask]);
-
- /* move head forward to mark it's consumption */
- if (try_cmpxchg_release(&slot->head, &head, head + 1))
- return obj;
- }
-
- return NULL;
-}
-
-/* allocate an object from object pool */
-void *objpool_pop(struct objpool_head *pool)
-{
- void *obj = NULL;
- unsigned long flags;
- int i, cpu;
-
- /* disable local irq to avoid preemption & interruption */
- raw_local_irq_save(flags);
-
- cpu = raw_smp_processor_id();
- for (i = 0; i < num_possible_cpus(); i++) {
- obj = objpool_try_get_slot(pool, cpu);
- if (obj)
- break;
- cpu = cpumask_next_wrap(cpu, cpu_possible_mask, -1, 1);
- }
- raw_local_irq_restore(flags);
-
- return obj;
-}
-EXPORT_SYMBOL_GPL(objpool_pop);
-
/* release whole objpool forcely */
void objpool_free(struct objpool_head *pool)
{
diff --git a/lib/raid6/Makefile b/lib/raid6/Makefile
index 385a94aa0b99..8785353c6140 100644
--- a/lib/raid6/Makefile
+++ b/lib/raid6/Makefile
@@ -53,7 +53,7 @@ endif
endif
quiet_cmd_unroll = UNROLL $@
- cmd_unroll = $(AWK) -v N=$* -f $(srctree)/$(src)/unroll.awk < $< > $@
+ cmd_unroll = $(AWK) -v N=$* -f $(src)/unroll.awk < $< > $@
targets += int1.c int2.c int4.c int8.c
$(obj)/int%.c: $(src)/int.uc $(src)/unroll.awk FORCE
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 6ae2ba8e06a2..dbbed19f8fff 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -130,7 +130,8 @@ static union nested_table *nested_table_alloc(struct rhashtable *ht,
if (ntbl)
return ntbl;
- ntbl = kzalloc(PAGE_SIZE, GFP_ATOMIC);
+ ntbl = alloc_hooks_tag(ht->alloc_tag,
+ kmalloc_noprof(PAGE_SIZE, GFP_ATOMIC|__GFP_ZERO));
if (ntbl && leaf) {
for (i = 0; i < PAGE_SIZE / sizeof(ntbl[0]); i++)
@@ -157,7 +158,8 @@ static struct bucket_table *nested_bucket_table_alloc(struct rhashtable *ht,
size = sizeof(*tbl) + sizeof(tbl->buckets[0]);
- tbl = kzalloc(size, gfp);
+ tbl = alloc_hooks_tag(ht->alloc_tag,
+ kmalloc_noprof(size, gfp|__GFP_ZERO));
if (!tbl)
return NULL;
@@ -181,7 +183,9 @@ static struct bucket_table *bucket_table_alloc(struct rhashtable *ht,
int i;
static struct lock_class_key __key;
- tbl = kvzalloc(struct_size(tbl, buckets, nbuckets), gfp);
+ tbl = alloc_hooks_tag(ht->alloc_tag,
+ kvmalloc_node_noprof(struct_size(tbl, buckets, nbuckets),
+ gfp|__GFP_ZERO, NUMA_NO_NODE));
size = nbuckets;
@@ -1016,7 +1020,7 @@ static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
* .obj_hashfn = my_hash_fn,
* };
*/
-int rhashtable_init(struct rhashtable *ht,
+int rhashtable_init_noprof(struct rhashtable *ht,
const struct rhashtable_params *params)
{
struct bucket_table *tbl;
@@ -1031,6 +1035,8 @@ int rhashtable_init(struct rhashtable *ht,
spin_lock_init(&ht->lock);
memcpy(&ht->p, params, sizeof(*params));
+ alloc_tag_record(ht->alloc_tag);
+
if (params->min_size)
ht->p.min_size = roundup_pow_of_two(params->min_size);
@@ -1076,7 +1082,7 @@ int rhashtable_init(struct rhashtable *ht,
return 0;
}
-EXPORT_SYMBOL_GPL(rhashtable_init);
+EXPORT_SYMBOL_GPL(rhashtable_init_noprof);
/**
* rhltable_init - initialize a new hash list table
@@ -1087,15 +1093,15 @@ EXPORT_SYMBOL_GPL(rhashtable_init);
*
* See documentation for rhashtable_init.
*/
-int rhltable_init(struct rhltable *hlt, const struct rhashtable_params *params)
+int rhltable_init_noprof(struct rhltable *hlt, const struct rhashtable_params *params)
{
int err;
- err = rhashtable_init(&hlt->ht, params);
+ err = rhashtable_init_noprof(&hlt->ht, params);
hlt->ht.rhlist = true;
return err;
}
-EXPORT_SYMBOL_GPL(rhltable_init);
+EXPORT_SYMBOL_GPL(rhltable_init_noprof);
static void rhashtable_free_one(struct rhashtable *ht, struct rhash_head *obj,
void (*free_fn)(void *ptr, void *arg),
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 92c6b1fd8989..1e453f825c05 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -494,18 +494,18 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags,
struct sbitmap_word *map = &sb->map[index];
unsigned long get_mask;
unsigned int map_depth = __map_depth(sb, index);
+ unsigned long val;
sbitmap_deferred_clear(map);
- if (map->word == (1UL << (map_depth - 1)) - 1)
+ val = READ_ONCE(map->word);
+ if (val == (1UL << (map_depth - 1)) - 1)
goto next;
- nr = find_first_zero_bit(&map->word, map_depth);
+ nr = find_first_zero_bit(&val, map_depth);
if (nr + nr_tags <= map_depth) {
atomic_long_t *ptr = (atomic_long_t *) &map->word;
- unsigned long val;
get_mask = ((1UL << nr_tags) - 1) << nr;
- val = READ_ONCE(map->word);
while (!atomic_long_try_cmpxchg(ptr, &val,
get_mask | val))
;
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 68b45c82c37a..7bc2220fea80 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -1124,7 +1124,7 @@ static ssize_t extract_user_to_sg(struct iov_iter *iter,
do {
res = iov_iter_extract_pages(iter, &pages, maxsize, sg_max,
extraction_flags, &off);
- if (res < 0)
+ if (res <= 0)
goto failed;
len = res;
diff --git a/lib/slub_kunit.c b/lib/slub_kunit.c
index d4a3730b08fa..4ce960438806 100644
--- a/lib/slub_kunit.c
+++ b/lib/slub_kunit.c
@@ -55,7 +55,7 @@ static void test_next_pointer(struct kunit *test)
ptr_addr = (unsigned long *)(p + s->offset);
tmp = *ptr_addr;
- p[s->offset] = 0x12;
+ p[s->offset] = ~p[s->offset];
/*
* Expecting three errors.
diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c
deleted file mode 100644
index e21be95514af..000000000000
--- a/lib/strcat_kunit.c
+++ /dev/null
@@ -1,104 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Kernel module for testing 'strcat' family of functions.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <kunit/test.h>
-#include <linux/string.h>
-
-static volatile int unconst;
-
-static void strcat_test(struct kunit *test)
-{
- char dest[8];
-
- /* Destination is terminated. */
- memset(dest, 0, sizeof(dest));
- KUNIT_EXPECT_EQ(test, strlen(dest), 0);
- /* Empty copy does nothing. */
- KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* 4 characters copied in, stops at %NUL. */
- KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest);
- KUNIT_EXPECT_STREQ(test, dest, "four");
- KUNIT_EXPECT_EQ(test, dest[5], '\0');
- /* 2 more characters copied in okay. */
- KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
-}
-
-static void strncat_test(struct kunit *test)
-{
- char dest[8];
-
- /* Destination is terminated. */
- memset(dest, 0, sizeof(dest));
- KUNIT_EXPECT_EQ(test, strlen(dest), 0);
- /* Empty copy of size 0 does nothing. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* Empty copy of size 1 does nothing too. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* Copy of max 0 characters should do nothing. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "");
-
- /* 4 characters copied in, even if max is 8. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "four");
- KUNIT_EXPECT_EQ(test, dest[5], '\0');
- KUNIT_EXPECT_EQ(test, dest[6], '\0');
- /* 2 characters copied in okay, 2 ignored. */
- KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
-}
-
-static void strlcat_test(struct kunit *test)
-{
- char dest[8] = "";
- int len = sizeof(dest) + unconst;
-
- /* Destination is terminated. */
- KUNIT_EXPECT_EQ(test, strlen(dest), 0);
- /* Empty copy is size 0. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0);
- KUNIT_EXPECT_STREQ(test, dest, "");
- /* Size 1 should keep buffer terminated, report size of source only. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4);
- KUNIT_EXPECT_STREQ(test, dest, "");
-
- /* 4 characters copied in. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4);
- KUNIT_EXPECT_STREQ(test, dest, "four");
- /* 2 characters copied in okay, gets to 6 total. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
- /* 2 characters ignored if max size (7) reached. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8);
- KUNIT_EXPECT_STREQ(test, dest, "fourAB");
- /* 1 of 2 characters skipped, now at true max size. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9);
- KUNIT_EXPECT_STREQ(test, dest, "fourABE");
- /* Everything else ignored, now at full size. */
- KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11);
- KUNIT_EXPECT_STREQ(test, dest, "fourABE");
-}
-
-static struct kunit_case strcat_test_cases[] = {
- KUNIT_CASE(strcat_test),
- KUNIT_CASE(strncat_test),
- KUNIT_CASE(strlcat_test),
- {}
-};
-
-static struct kunit_suite strcat_test_suite = {
- .name = "strcat",
- .test_cases = strcat_test_cases,
-};
-
-kunit_test_suite(strcat_test_suite);
-
-MODULE_LICENSE("GPL");
diff --git a/lib/string_kunit.c b/lib/string_kunit.c
index eabf025cf77c..2a812decf14b 100644
--- a/lib/string_kunit.c
+++ b/lib/string_kunit.c
@@ -11,7 +11,13 @@
#include <linux/slab.h>
#include <linux/string.h>
-static void test_memset16(struct kunit *test)
+#define STRCMP_LARGE_BUF_LEN 2048
+#define STRCMP_CHANGE_POINT 1337
+#define STRCMP_TEST_EXPECT_EQUAL(test, fn, ...) KUNIT_EXPECT_EQ(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_LOWER(test, fn, ...) KUNIT_EXPECT_LT(test, fn(__VA_ARGS__), 0)
+#define STRCMP_TEST_EXPECT_GREATER(test, fn, ...) KUNIT_EXPECT_GT(test, fn(__VA_ARGS__), 0)
+
+static void string_test_memset16(struct kunit *test)
{
unsigned i, j, k;
u16 v, *p;
@@ -40,7 +46,7 @@ static void test_memset16(struct kunit *test)
}
}
-static void test_memset32(struct kunit *test)
+static void string_test_memset32(struct kunit *test)
{
unsigned i, j, k;
u32 v, *p;
@@ -69,7 +75,7 @@ static void test_memset32(struct kunit *test)
}
}
-static void test_memset64(struct kunit *test)
+static void string_test_memset64(struct kunit *test)
{
unsigned i, j, k;
u64 v, *p;
@@ -98,7 +104,7 @@ static void test_memset64(struct kunit *test)
}
}
-static void test_strchr(struct kunit *test)
+static void string_test_strchr(struct kunit *test)
{
const char *test_string = "abcdefghijkl";
const char *empty_string = "";
@@ -121,7 +127,7 @@ static void test_strchr(struct kunit *test)
KUNIT_ASSERT_NULL(test, result);
}
-static void test_strnchr(struct kunit *test)
+static void string_test_strnchr(struct kunit *test)
{
const char *test_string = "abcdefghijkl";
const char *empty_string = "";
@@ -154,7 +160,7 @@ static void test_strnchr(struct kunit *test)
KUNIT_ASSERT_NULL(test, result);
}
-static void test_strspn(struct kunit *test)
+static void string_test_strspn(struct kunit *test)
{
static const struct strspn_test {
const char str[16];
@@ -179,13 +185,444 @@ static void test_strspn(struct kunit *test)
}
}
+static char strcmp_buffer1[STRCMP_LARGE_BUF_LEN];
+static char strcmp_buffer2[STRCMP_LARGE_BUF_LEN];
+
+static void strcmp_fill_buffers(char fill1, char fill2)
+{
+ memset(strcmp_buffer1, fill1, STRCMP_LARGE_BUF_LEN);
+ memset(strcmp_buffer2, fill2, STRCMP_LARGE_BUF_LEN);
+ strcmp_buffer1[STRCMP_LARGE_BUF_LEN - 1] = 0;
+ strcmp_buffer2[STRCMP_LARGE_BUF_LEN - 1] = 0;
+}
+
+static void string_test_strcmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "Hello, Kernel!", "Hello, Kernel!");
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Hello, KUnit!", "Hello, Kernel!");
+ /* First string is lexicographically larger than the second */
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, "Hello, Kernel!", "Hello, KUnit!");
+ /* Empty string is always lexicographically less than any non-empty string */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "", "Non-empty string");
+ /* Two empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, "", "");
+ /* Compare two strings which have only one char difference */
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Abacaba", "Abadaba");
+ /* Compare two strings which have the same prefix*/
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, "Just a string", "Just a string and something else");
+}
+
+static void string_test_strcmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void string_test_strncmp(struct kunit *test)
+{
+ /* Equal strings */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, KUnit!", "Hello, KUnit!", 13);
+ /* First string is lexicographically less than the second */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Hello, KUnit!", "Hello, Kernel!", 13);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Hello, Kernel!", "Hello, KUnit!", 0);
+ /* Strings with common prefix are equal if count = length of prefix */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Abacaba", "Abadaba", 3);
+ /* Strings with common prefix are not equal when count = length of prefix + 1 */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Abacaba", "Abadaba", 4);
+ /* If one string is a prefix of another, the shorter string is lexicographically smaller */
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string and something else"));
+ /*
+ * If one string is a prefix of another, and we check first length
+ * of prefix chars, the result is 'equal'
+ */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, "Just a string", "Just a string and something else",
+ strlen("Just a string"));
+}
+
+static void string_test_strncmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('B', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'A';
+ STRCMP_TEST_EXPECT_LOWER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+ /* the strings are equal up to STRCMP_CHANGE_POINT */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+static void string_test_strcasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "Hello, Kernel!", "HeLLO, KErNeL!");
+ /* Empty strings should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "", "");
+ /* Despite ascii code for 'a' is larger than ascii code for 'B', 'a' < 'B' */
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, "a", "B");
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, "B", "a");
+ /* Special symbols and numbers should be processed correctly */
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, "-+**.1230ghTTT~^", "-+**.1230Ghttt~^");
+}
+
+static void string_test_strcasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strcasecmp, strcmp_buffer1, strcmp_buffer2);
+}
+
+static void string_test_strncasecmp(struct kunit *test)
+{
+ /* Same strings in different case should be equal */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbAcAbA", "Abacaba", strlen("Abacaba"));
+ /* strncasecmp should check 'count' chars only */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "AbaCaBa", "abaCaDa", 5);
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, "a", "B", 1);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, "B", "a", 1);
+ /* Result is always 'equal' when count = 0 */
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, "Abacaba", "Not abacaba", 0);
+}
+
+static void string_test_strncasecmp_long_strings(struct kunit *test)
+{
+ strcmp_fill_buffers('b', 'B');
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'a';
+ STRCMP_TEST_EXPECT_LOWER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ strcmp_buffer1[STRCMP_CHANGE_POINT] = 'C';
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_LARGE_BUF_LEN);
+
+ STRCMP_TEST_EXPECT_EQUAL(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT);
+ STRCMP_TEST_EXPECT_GREATER(test, strncasecmp, strcmp_buffer1,
+ strcmp_buffer2, STRCMP_CHANGE_POINT + 1);
+}
+
+/**
+ * strscpy_check() - Run a specific test case.
+ * @test: KUnit test context pointer
+ * @src: Source string, argument to strscpy_pad()
+ * @count: Size of destination buffer, argument to strscpy_pad()
+ * @expected: Expected return value from call to strscpy_pad()
+ * @chars: Number of characters from the src string expected to be
+ * written to the dst buffer.
+ * @terminator: 1 if there should be a terminating null byte 0 otherwise.
+ * @pad: Number of pad characters expected (in the tail of dst buffer).
+ * (@pad does not include the null terminator byte.)
+ *
+ * Calls strscpy_pad() and verifies the return value and state of the
+ * destination buffer after the call returns.
+ */
+static void strscpy_check(struct kunit *test, char *src, int count,
+ int expected, int chars, int terminator, int pad)
+{
+ int nr_bytes_poison;
+ int max_expected;
+ int max_count;
+ int written;
+ char buf[6];
+ int index, i;
+ const char POISON = 'z';
+
+ KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
+ "null source string not supported");
+
+ memset(buf, POISON, sizeof(buf));
+ /* Future proofing test suite, validate args */
+ max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
+ max_expected = count - 1; /* Space for the null */
+
+ KUNIT_ASSERT_LE_MSG(test, count, max_count,
+ "count (%d) is too big (%d) ... aborting", count, max_count);
+ KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
+ "expected (%d) is bigger than can possibly be returned (%d)",
+ expected, max_expected);
+
+ written = strscpy_pad(buf, src, count);
+ KUNIT_ASSERT_EQ(test, written, expected);
+
+ if (count && written == -E2BIG) {
+ KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
+ "buffer state invalid for -E2BIG");
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "too big string is not null terminated correctly");
+ }
+
+ for (i = 0; i < chars; i++)
+ KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
+ "buf[i]==%c != src[i]==%c", buf[i], src[i]);
+
+ if (terminator)
+ KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
+ "string is not null terminated correctly");
+
+ for (i = 0; i < pad; i++) {
+ index = chars + terminator + i;
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
+ "padding missing at index: %d", i);
+ }
+
+ nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
+ for (i = 0; i < nr_bytes_poison; i++) {
+ index = sizeof(buf) - 1 - i; /* Check from the end back */
+ KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
+ "poison value missing at index: %d", i);
+ }
+}
+
+static void string_test_strscpy(struct kunit *test)
+{
+ char dest[8];
+
+ /*
+ * strscpy_check() uses a destination buffer of size 6 and needs at
+ * least 2 characters spare (one for null and one to check for
+ * overflow). This means we should only call tc() with
+ * strings up to a maximum of 4 characters long and 'count'
+ * should not exceed 4. To test with longer strings increase
+ * the buffer size in tc().
+ */
+
+ /* strscpy_check(test, src, count, expected, chars, terminator, pad) */
+ strscpy_check(test, "a", 0, -E2BIG, 0, 0, 0);
+ strscpy_check(test, "", 0, -E2BIG, 0, 0, 0);
+
+ strscpy_check(test, "a", 1, -E2BIG, 0, 1, 0);
+ strscpy_check(test, "", 1, 0, 0, 1, 0);
+
+ strscpy_check(test, "ab", 2, -E2BIG, 1, 1, 0);
+ strscpy_check(test, "a", 2, 1, 1, 1, 0);
+ strscpy_check(test, "", 2, 0, 0, 1, 1);
+
+ strscpy_check(test, "abc", 3, -E2BIG, 2, 1, 0);
+ strscpy_check(test, "ab", 3, 2, 2, 1, 0);
+ strscpy_check(test, "a", 3, 1, 1, 1, 1);
+ strscpy_check(test, "", 3, 0, 0, 1, 2);
+
+ strscpy_check(test, "abcd", 4, -E2BIG, 3, 1, 0);
+ strscpy_check(test, "abc", 4, 3, 3, 1, 0);
+ strscpy_check(test, "ab", 4, 2, 2, 1, 1);
+ strscpy_check(test, "a", 4, 1, 1, 1, 2);
+ strscpy_check(test, "", 4, 0, 0, 1, 3);
+
+ /* Compile-time-known source strings. */
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
+ KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
+}
+
+static volatile int unconst;
+
+static void string_test_strcat(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy does nothing. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* 4 characters copied in, stops at %NUL. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ /* 2 more characters copied in okay. */
+ KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void string_test_strncat(struct kunit *test)
+{
+ char dest[8];
+
+ /* Destination is terminated. */
+ memset(dest, 0, sizeof(dest));
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy of size 0 does nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Empty copy of size 1 does nothing too. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Copy of max 0 characters should do nothing. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in, even if max is 8. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ KUNIT_EXPECT_EQ(test, dest[5], '\0');
+ KUNIT_EXPECT_EQ(test, dest[6], '\0');
+ /* 2 characters copied in okay, 2 ignored. */
+ KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+}
+
+static void string_test_strlcat(struct kunit *test)
+{
+ char dest[8] = "";
+ int len = sizeof(dest) + unconst;
+
+ /* Destination is terminated. */
+ KUNIT_EXPECT_EQ(test, strlen(dest), 0);
+ /* Empty copy is size 0. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+ /* Size 1 should keep buffer terminated, report size of source only. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "");
+
+ /* 4 characters copied in. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4);
+ KUNIT_EXPECT_STREQ(test, dest, "four");
+ /* 2 characters copied in okay, gets to 6 total. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 2 characters ignored if max size (7) reached. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8);
+ KUNIT_EXPECT_STREQ(test, dest, "fourAB");
+ /* 1 of 2 characters skipped, now at true max size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+ /* Everything else ignored, now at full size. */
+ KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11);
+ KUNIT_EXPECT_STREQ(test, dest, "fourABE");
+}
+
+static void string_test_strtomem(struct kunit *test)
+{
+ static const char input[sizeof(unsigned long)] = "hi";
+ static const char truncate[] = "this is too long";
+ struct {
+ unsigned long canary1;
+ unsigned char output[sizeof(unsigned long)] __nonstring;
+ unsigned long canary2;
+ } wrap;
+
+ memset(&wrap, 0xFF, sizeof(wrap));
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX,
+ "bad initial canary value");
+ KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX,
+ "bad initial canary value");
+
+ /* Check unpadded copy leaves surroundings untouched. */
+ strtomem(wrap.output, input);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated copy leaves surroundings untouched. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check padded copy leaves only string padded. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem_pad(wrap.output, input, 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]);
+ KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]);
+ for (size_t i = 2; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+
+ /* Check truncated padded copy has no padding. */
+ memset(&wrap, 0xFF, sizeof(wrap));
+ strtomem(wrap.output, truncate);
+ KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX);
+ for (size_t i = 0; i < sizeof(wrap.output); i++)
+ KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]);
+ KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX);
+}
+
+
+static void string_test_memtostr(struct kunit *test)
+{
+ char nonstring[7] = { 'a', 'b', 'c', 'd', 'e', 'f', 'g' };
+ char nonstring_small[3] = { 'a', 'b', 'c' };
+ char dest[sizeof(nonstring) + 1];
+
+ /* Copy in a non-NUL-terminated string into exactly right-sized dest. */
+ KUNIT_EXPECT_EQ(test, sizeof(dest), sizeof(nonstring) + 1);
+ memset(dest, 'X', sizeof(dest));
+ memtostr(dest, nonstring);
+ KUNIT_EXPECT_STREQ(test, dest, "abcdefg");
+ memset(dest, 'X', sizeof(dest));
+ memtostr(dest, nonstring_small);
+ KUNIT_EXPECT_STREQ(test, dest, "abc");
+ KUNIT_EXPECT_EQ(test, dest[7], 'X');
+
+ memset(dest, 'X', sizeof(dest));
+ memtostr_pad(dest, nonstring);
+ KUNIT_EXPECT_STREQ(test, dest, "abcdefg");
+ memset(dest, 'X', sizeof(dest));
+ memtostr_pad(dest, nonstring_small);
+ KUNIT_EXPECT_STREQ(test, dest, "abc");
+ KUNIT_EXPECT_EQ(test, dest[7], '\0');
+}
+
static struct kunit_case string_test_cases[] = {
- KUNIT_CASE(test_memset16),
- KUNIT_CASE(test_memset32),
- KUNIT_CASE(test_memset64),
- KUNIT_CASE(test_strchr),
- KUNIT_CASE(test_strnchr),
- KUNIT_CASE(test_strspn),
+ KUNIT_CASE(string_test_memset16),
+ KUNIT_CASE(string_test_memset32),
+ KUNIT_CASE(string_test_memset64),
+ KUNIT_CASE(string_test_strchr),
+ KUNIT_CASE(string_test_strnchr),
+ KUNIT_CASE(string_test_strspn),
+ KUNIT_CASE(string_test_strcmp),
+ KUNIT_CASE(string_test_strcmp_long_strings),
+ KUNIT_CASE(string_test_strncmp),
+ KUNIT_CASE(string_test_strncmp_long_strings),
+ KUNIT_CASE(string_test_strcasecmp),
+ KUNIT_CASE(string_test_strcasecmp_long_strings),
+ KUNIT_CASE(string_test_strncasecmp),
+ KUNIT_CASE(string_test_strncasecmp_long_strings),
+ KUNIT_CASE(string_test_strscpy),
+ KUNIT_CASE(string_test_strcat),
+ KUNIT_CASE(string_test_strncat),
+ KUNIT_CASE(string_test_strlcat),
+ KUNIT_CASE(string_test_strtomem),
+ KUNIT_CASE(string_test_memtostr),
{}
};
diff --git a/lib/strscpy_kunit.c b/lib/strscpy_kunit.c
deleted file mode 100644
index a6b6344354ed..000000000000
--- a/lib/strscpy_kunit.c
+++ /dev/null
@@ -1,142 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Kernel module for testing 'strscpy' family of functions.
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <kunit/test.h>
-#include <linux/string.h>
-
-/*
- * tc() - Run a specific test case.
- * @src: Source string, argument to strscpy_pad()
- * @count: Size of destination buffer, argument to strscpy_pad()
- * @expected: Expected return value from call to strscpy_pad()
- * @terminator: 1 if there should be a terminating null byte 0 otherwise.
- * @chars: Number of characters from the src string expected to be
- * written to the dst buffer.
- * @pad: Number of pad characters expected (in the tail of dst buffer).
- * (@pad does not include the null terminator byte.)
- *
- * Calls strscpy_pad() and verifies the return value and state of the
- * destination buffer after the call returns.
- */
-static void tc(struct kunit *test, char *src, int count, int expected,
- int chars, int terminator, int pad)
-{
- int nr_bytes_poison;
- int max_expected;
- int max_count;
- int written;
- char buf[6];
- int index, i;
- const char POISON = 'z';
-
- KUNIT_ASSERT_TRUE_MSG(test, src != NULL,
- "null source string not supported");
-
- memset(buf, POISON, sizeof(buf));
- /* Future proofing test suite, validate args */
- max_count = sizeof(buf) - 2; /* Space for null and to verify overflow */
- max_expected = count - 1; /* Space for the null */
-
- KUNIT_ASSERT_LE_MSG(test, count, max_count,
- "count (%d) is too big (%d) ... aborting", count, max_count);
- KUNIT_EXPECT_LE_MSG(test, expected, max_expected,
- "expected (%d) is bigger than can possibly be returned (%d)",
- expected, max_expected);
-
- written = strscpy_pad(buf, src, count);
- KUNIT_ASSERT_EQ(test, written, expected);
-
- if (count && written == -E2BIG) {
- KUNIT_ASSERT_EQ_MSG(test, 0, strncmp(buf, src, count - 1),
- "buffer state invalid for -E2BIG");
- KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
- "too big string is not null terminated correctly");
- }
-
- for (i = 0; i < chars; i++)
- KUNIT_ASSERT_EQ_MSG(test, buf[i], src[i],
- "buf[i]==%c != src[i]==%c", buf[i], src[i]);
-
- if (terminator)
- KUNIT_ASSERT_EQ_MSG(test, buf[count - 1], '\0',
- "string is not null terminated correctly");
-
- for (i = 0; i < pad; i++) {
- index = chars + terminator + i;
- KUNIT_ASSERT_EQ_MSG(test, buf[index], '\0',
- "padding missing at index: %d", i);
- }
-
- nr_bytes_poison = sizeof(buf) - chars - terminator - pad;
- for (i = 0; i < nr_bytes_poison; i++) {
- index = sizeof(buf) - 1 - i; /* Check from the end back */
- KUNIT_ASSERT_EQ_MSG(test, buf[index], POISON,
- "poison value missing at index: %d", i);
- }
-}
-
-static void strscpy_test(struct kunit *test)
-{
- char dest[8];
-
- /*
- * tc() uses a destination buffer of size 6 and needs at
- * least 2 characters spare (one for null and one to check for
- * overflow). This means we should only call tc() with
- * strings up to a maximum of 4 characters long and 'count'
- * should not exceed 4. To test with longer strings increase
- * the buffer size in tc().
- */
-
- /* tc(test, src, count, expected, chars, terminator, pad) */
- tc(test, "a", 0, -E2BIG, 0, 0, 0);
- tc(test, "", 0, -E2BIG, 0, 0, 0);
-
- tc(test, "a", 1, -E2BIG, 0, 1, 0);
- tc(test, "", 1, 0, 0, 1, 0);
-
- tc(test, "ab", 2, -E2BIG, 1, 1, 0);
- tc(test, "a", 2, 1, 1, 1, 0);
- tc(test, "", 2, 0, 0, 1, 1);
-
- tc(test, "abc", 3, -E2BIG, 2, 1, 0);
- tc(test, "ab", 3, 2, 2, 1, 0);
- tc(test, "a", 3, 1, 1, 1, 1);
- tc(test, "", 3, 0, 0, 1, 2);
-
- tc(test, "abcd", 4, -E2BIG, 3, 1, 0);
- tc(test, "abc", 4, 3, 3, 1, 0);
- tc(test, "ab", 4, 2, 2, 1, 1);
- tc(test, "a", 4, 1, 1, 1, 2);
- tc(test, "", 4, 0, 0, 1, 3);
-
- /* Compile-time-known source strings. */
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", ARRAY_SIZE(dest)), 0);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", 3), 0);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", 1), 0);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "", 0), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", ARRAY_SIZE(dest)), 5);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 3), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 1), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "Fixed", 0), -E2BIG);
- KUNIT_EXPECT_EQ(test, strscpy(dest, "This is too long", ARRAY_SIZE(dest)), -E2BIG);
-}
-
-static struct kunit_case strscpy_test_cases[] = {
- KUNIT_CASE(strscpy_test),
- {}
-};
-
-static struct kunit_suite strscpy_test_suite = {
- .name = "strscpy",
- .test_cases = strscpy_test_cases,
-};
-
-kunit_test_suite(strscpy_test_suite);
-
-MODULE_AUTHOR("Tobin C. Harding <tobin@kernel.org>");
-MODULE_LICENSE("GPL");
diff --git a/lib/test_bitmap.c b/lib/test_bitmap.c
index 6b2b33579f56..83019beabce4 100644
--- a/lib/test_bitmap.c
+++ b/lib/test_bitmap.c
@@ -60,18 +60,17 @@ static const unsigned long exp3_1_0[] __initconst = {
};
static bool __init
-__check_eq_uint(const char *srcfile, unsigned int line,
- const unsigned int exp_uint, unsigned int x)
+__check_eq_ulong(const char *srcfile, unsigned int line,
+ const unsigned long exp_ulong, unsigned long x)
{
- if (exp_uint != x) {
- pr_err("[%s:%u] expected %u, got %u\n",
- srcfile, line, exp_uint, x);
+ if (exp_ulong != x) {
+ pr_err("[%s:%u] expected %lu, got %lu\n",
+ srcfile, line, exp_ulong, x);
return false;
}
return true;
}
-
static bool __init
__check_eq_bitmap(const char *srcfile, unsigned int line,
const unsigned long *exp_bmap, const unsigned long *bmap,
@@ -185,7 +184,8 @@ __check_eq_str(const char *srcfile, unsigned int line,
result; \
})
-#define expect_eq_uint(...) __expect_eq(uint, ##__VA_ARGS__)
+#define expect_eq_ulong(...) __expect_eq(ulong, ##__VA_ARGS__)
+#define expect_eq_uint(x, y) expect_eq_ulong((unsigned int)(x), (unsigned int)(y))
#define expect_eq_bitmap(...) __expect_eq(bitmap, ##__VA_ARGS__)
#define expect_eq_pbl(...) __expect_eq(pbl, ##__VA_ARGS__)
#define expect_eq_u32_array(...) __expect_eq(u32_array, ##__VA_ARGS__)
@@ -548,7 +548,7 @@ static void __init test_bitmap_parselist(void)
}
if (ptest.flags & PARSE_TIME)
- pr_err("parselist: %d: input is '%s' OK, Time: %llu\n",
+ pr_info("parselist: %d: input is '%s' OK, Time: %llu\n",
i, ptest.in, time);
#undef ptest
@@ -587,7 +587,7 @@ static void __init test_bitmap_printlist(void)
goto out;
}
- pr_err("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
+ pr_info("bitmap_print_to_pagebuf: input is '%s', Time: %llu\n", buf, time);
out:
kfree(buf);
kfree(bmap);
@@ -665,7 +665,7 @@ static void __init test_bitmap_parse(void)
}
if (test.flags & PARSE_TIME)
- pr_err("parse: %d: input is '%s' OK, Time: %llu\n",
+ pr_info("parse: %d: input is '%s' OK, Time: %llu\n",
i, test.in, time);
}
}
@@ -1245,14 +1245,7 @@ static void __init test_bitmap_const_eval(void)
* in runtime.
*/
- /*
- * Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }`.
- * Clang on s390 optimizes bitops at compile-time as intended, but at
- * the same time stops treating @bitmap and @bitopvar as compile-time
- * constants after regular test_bit() is executed, thus triggering the
- * build bugs below. So, call const_test_bit() there directly until
- * the compiler is fixed.
- */
+ /* Equals to `unsigned long bitmap[1] = { GENMASK(6, 5), }` */
bitmap_clear(bitmap, 0, BITS_PER_LONG);
if (!test_bit(7, bitmap))
bitmap_set(bitmap, 5, 2);
@@ -1284,8 +1277,179 @@ static void __init test_bitmap_const_eval(void)
/* ~BIT(25) */
BUILD_BUG_ON(!__builtin_constant_p(~var));
BUILD_BUG_ON(~var != ~BIT(25));
+
+ /* ~BIT(25) | BIT(25) == ~0UL */
+ bitmap_complement(&var, &var, BITS_PER_LONG);
+ __assign_bit(25, &var, true);
+
+ /* !(~(~0UL)) == 1 */
+ res = bitmap_full(&var, BITS_PER_LONG);
+ BUILD_BUG_ON(!__builtin_constant_p(res));
+ BUILD_BUG_ON(!res);
+}
+
+/*
+ * Test bitmap should be big enough to include the cases when start is not in
+ * the first word, and start+nbits lands in the following word.
+ */
+#define TEST_BIT_LEN (1000)
+
+/*
+ * Helper function to test bitmap_write() overwriting the chosen byte pattern.
+ */
+static void __init test_bitmap_write_helper(const char *pattern)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ DECLARE_BITMAP(exp_bitmap, TEST_BIT_LEN);
+ DECLARE_BITMAP(pat_bitmap, TEST_BIT_LEN);
+ unsigned long w, r, bit;
+ int i, n, nbits;
+
+ /*
+ * Only parse the pattern once and store the result in the intermediate
+ * bitmap.
+ */
+ bitmap_parselist(pattern, pat_bitmap, TEST_BIT_LEN);
+
+ /*
+ * Check that writing a single bit does not accidentally touch the
+ * adjacent bits.
+ */
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
+ bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
+ for (bit = 0; bit <= 1; bit++) {
+ bitmap_write(bitmap, bit, i, 1);
+ __assign_bit(i, exp_bitmap, bit);
+ expect_eq_bitmap(exp_bitmap, bitmap,
+ TEST_BIT_LEN);
+ }
+ }
+
+ /* Ensure writing 0 bits does not change anything. */
+ bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
+ bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ bitmap_write(bitmap, ~0UL, i, 0);
+ expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
+ }
+
+ for (nbits = BITS_PER_LONG; nbits >= 1; nbits--) {
+ w = IS_ENABLED(CONFIG_64BIT) ? 0xdeadbeefdeadbeefUL
+ : 0xdeadbeefUL;
+ w >>= (BITS_PER_LONG - nbits);
+ for (i = 0; i <= TEST_BIT_LEN - nbits; i++) {
+ bitmap_copy(bitmap, pat_bitmap, TEST_BIT_LEN);
+ bitmap_copy(exp_bitmap, pat_bitmap, TEST_BIT_LEN);
+ for (n = 0; n < nbits; n++)
+ __assign_bit(i + n, exp_bitmap, w & BIT(n));
+ bitmap_write(bitmap, w, i, nbits);
+ expect_eq_bitmap(exp_bitmap, bitmap, TEST_BIT_LEN);
+ r = bitmap_read(bitmap, i, nbits);
+ expect_eq_ulong(r, w);
+ }
+ }
+}
+
+static void __init test_bitmap_read_write(void)
+{
+ unsigned char *pattern[3] = {"", "all:1/2", "all"};
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ unsigned long zero_bits = 0, bits_per_long = BITS_PER_LONG;
+ unsigned long val;
+ int i, pi;
+
+ /*
+ * Reading/writing zero bits should not crash the kernel.
+ * READ_ONCE() prevents constant folding.
+ */
+ bitmap_write(NULL, 0, 0, READ_ONCE(zero_bits));
+ /* Return value of bitmap_read() is undefined here. */
+ bitmap_read(NULL, 0, READ_ONCE(zero_bits));
+
+ /*
+ * Reading/writing more than BITS_PER_LONG bits should not crash the
+ * kernel. READ_ONCE() prevents constant folding.
+ */
+ bitmap_write(NULL, 0, 0, READ_ONCE(bits_per_long) + 1);
+ /* Return value of bitmap_read() is undefined here. */
+ bitmap_read(NULL, 0, READ_ONCE(bits_per_long) + 1);
+
+ /*
+ * Ensure that bitmap_read() reads the same value that was previously
+ * written, and two consequent values are correctly merged.
+ * The resulting bit pattern is asymmetric to rule out possible issues
+ * with bit numeration order.
+ */
+ for (i = 0; i < TEST_BIT_LEN - 7; i++) {
+ bitmap_zero(bitmap, TEST_BIT_LEN);
+
+ bitmap_write(bitmap, 0b10101UL, i, 5);
+ val = bitmap_read(bitmap, i, 5);
+ expect_eq_ulong(0b10101UL, val);
+
+ bitmap_write(bitmap, 0b101UL, i + 5, 3);
+ val = bitmap_read(bitmap, i + 5, 3);
+ expect_eq_ulong(0b101UL, val);
+
+ val = bitmap_read(bitmap, i, 8);
+ expect_eq_ulong(0b10110101UL, val);
+ }
+
+ for (pi = 0; pi < ARRAY_SIZE(pattern); pi++)
+ test_bitmap_write_helper(pattern[pi]);
}
+static void __init test_bitmap_read_perf(void)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ unsigned int cnt, nbits, i;
+ unsigned long val;
+ ktime_t time;
+
+ bitmap_fill(bitmap, TEST_BIT_LEN);
+ time = ktime_get();
+ for (cnt = 0; cnt < 5; cnt++) {
+ for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) {
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ if (i + nbits > TEST_BIT_LEN)
+ break;
+ /*
+ * Prevent the compiler from optimizing away the
+ * bitmap_read() by using its value.
+ */
+ WRITE_ONCE(val, bitmap_read(bitmap, i, nbits));
+ }
+ }
+ }
+ time = ktime_get() - time;
+ pr_info("Time spent in %s:\t%llu\n", __func__, time);
+}
+
+static void __init test_bitmap_write_perf(void)
+{
+ DECLARE_BITMAP(bitmap, TEST_BIT_LEN);
+ unsigned int cnt, nbits, i;
+ unsigned long val = 0xfeedface;
+ ktime_t time;
+
+ bitmap_zero(bitmap, TEST_BIT_LEN);
+ time = ktime_get();
+ for (cnt = 0; cnt < 5; cnt++) {
+ for (nbits = 1; nbits <= BITS_PER_LONG; nbits++) {
+ for (i = 0; i < TEST_BIT_LEN; i++) {
+ if (i + nbits > TEST_BIT_LEN)
+ break;
+ bitmap_write(bitmap, val, i, nbits);
+ }
+ }
+ }
+ time = ktime_get() - time;
+ pr_info("Time spent in %s:\t%llu\n", __func__, time);
+}
+
+#undef TEST_BIT_LEN
+
static void __init selftest(void)
{
test_zero_clear();
@@ -1303,6 +1467,9 @@ static void __init selftest(void)
test_bitmap_cut();
test_bitmap_print_buf();
test_bitmap_const_eval();
+ test_bitmap_read_write();
+ test_bitmap_read_perf();
+ test_bitmap_write_perf();
test_find_nth_bit();
test_for_each_set_bit();
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 569e6d2dc55c..207ff87194db 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -13431,7 +13431,7 @@ static struct bpf_test tests[] = {
.stack_depth = 8,
.nr_testruns = NR_PATTERN_RUNS,
},
- /* 64-bit atomic magnitudes */
+ /* 32-bit atomic magnitudes */
{
"ATOMIC_W_ADD: all operand magnitudes",
{ },
diff --git a/lib/test_hmm.c b/lib/test_hmm.c
index 717dcb830127..b823ba7cb6a1 100644
--- a/lib/test_hmm.c
+++ b/lib/test_hmm.c
@@ -1226,8 +1226,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
unsigned long *src_pfns;
unsigned long *dst_pfns;
- src_pfns = kcalloc(npages, sizeof(*src_pfns), GFP_KERNEL);
- dst_pfns = kcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL);
+ src_pfns = kvcalloc(npages, sizeof(*src_pfns), GFP_KERNEL | __GFP_NOFAIL);
+ dst_pfns = kvcalloc(npages, sizeof(*dst_pfns), GFP_KERNEL | __GFP_NOFAIL);
migrate_device_range(src_pfns, start_pfn, npages);
for (i = 0; i < npages; i++) {
@@ -1250,8 +1250,8 @@ static void dmirror_device_evict_chunk(struct dmirror_chunk *chunk)
}
migrate_device_pages(src_pfns, dst_pfns, npages);
migrate_device_finalize(src_pfns, dst_pfns, npages);
- kfree(src_pfns);
- kfree(dst_pfns);
+ kvfree(src_pfns);
+ kvfree(dst_pfns);
}
/* Removes free pages from the free list so they can't be re-allocated */
diff --git a/lib/test_xarray.c b/lib/test_xarray.c
index ebe2af2e072d..ab9cc42a0d74 100644
--- a/lib/test_xarray.c
+++ b/lib/test_xarray.c
@@ -744,15 +744,20 @@ static noinline void check_xa_multi_store_adv_add(struct xarray *xa,
do {
xas_lock_irq(&xas);
-
xas_store(&xas, p);
- XA_BUG_ON(xa, xas_error(&xas));
- XA_BUG_ON(xa, xa_load(xa, index) != p);
-
xas_unlock_irq(&xas);
+ /*
+ * In our selftest case the only failure we can expect is for
+ * there not to be enough memory as we're not mimicking the
+ * entire page cache, so verify that's the only error we can run
+ * into here. The xas_nomem() which follows will ensure to fix
+ * that condition for us so to chug on on the loop.
+ */
+ XA_BUG_ON(xa, xas_error(&xas) && xas_error(&xas) != -ENOMEM);
} while (xas_nomem(&xas, GFP_KERNEL));
XA_BUG_ON(xa, xas_error(&xas));
+ XA_BUG_ON(xa, xa_load(xa, index) != p);
}
/* mimics page_cache_delete() */
@@ -1783,9 +1788,11 @@ static void check_split_1(struct xarray *xa, unsigned long index,
unsigned int order, unsigned int new_order)
{
XA_STATE_ORDER(xas, xa, index, new_order);
- unsigned int i;
+ unsigned int i, found;
+ void *entry;
xa_store_order(xa, index, order, xa, GFP_KERNEL);
+ xa_set_mark(xa, index, XA_MARK_1);
xas_split_alloc(&xas, xa, order, GFP_KERNEL);
xas_lock(&xas);
@@ -1802,6 +1809,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
xa_set_mark(xa, index, XA_MARK_0);
XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
+ xas_set_order(&xas, index, 0);
+ found = 0;
+ rcu_read_lock();
+ xas_for_each_marked(&xas, entry, ULONG_MAX, XA_MARK_1) {
+ found++;
+ XA_BUG_ON(xa, xa_is_internal(entry));
+ }
+ rcu_read_unlock();
+ XA_BUG_ON(xa, found != 1 << (order - new_order));
+
xa_destroy(xa);
}
@@ -1984,6 +2001,97 @@ static noinline void check_get_order(struct xarray *xa)
}
}
+static noinline void check_xas_get_order(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j;
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xas_set_order(&xas, i << order, order);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(i));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ for (j = i << order; j < (i + 1) << order; j++) {
+ xas_set_order(&xas, j, 0);
+ rcu_read_lock();
+ xas_load(&xas);
+ XA_BUG_ON(xa, xas_get_order(&xas) != order);
+ rcu_read_unlock();
+ }
+
+ xas_lock(&xas);
+ xas_set_order(&xas, i << order, order);
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+ }
+}
+
+static noinline void check_xas_conflict_get_order(struct xarray *xa)
+{
+ XA_STATE(xas, xa, 0);
+
+ void *entry;
+ int only_once;
+ unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1;
+ unsigned int order;
+ unsigned long i, j, k;
+
+ for (order = 0; order < max_order; order++) {
+ for (i = 0; i < 10; i++) {
+ xas_set_order(&xas, i << order, order);
+ do {
+ xas_lock(&xas);
+ xas_store(&xas, xa_mk_value(i));
+ xas_unlock(&xas);
+ } while (xas_nomem(&xas, GFP_KERNEL));
+
+ /*
+ * Ensure xas_get_order works with xas_for_each_conflict.
+ */
+ j = i << order;
+ for (k = 0; k < order; k++) {
+ only_once = 0;
+ xas_set_order(&xas, j + (1 << k), k);
+ xas_lock(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, xas_get_order(&xas) != order);
+ only_once++;
+ }
+ XA_BUG_ON(xa, only_once != 1);
+ xas_unlock(&xas);
+ }
+
+ if (order < max_order - 1) {
+ only_once = 0;
+ xas_set_order(&xas, (i & ~1UL) << order, order + 1);
+ xas_lock(&xas);
+ xas_for_each_conflict(&xas, entry) {
+ XA_BUG_ON(xa, entry != xa_mk_value(i));
+ XA_BUG_ON(xa, xas_get_order(&xas) != order);
+ only_once++;
+ }
+ XA_BUG_ON(xa, only_once != 1);
+ xas_unlock(&xas);
+ }
+
+ xas_set_order(&xas, i << order, order);
+ xas_lock(&xas);
+ xas_store(&xas, NULL);
+ xas_unlock(&xas);
+ }
+ }
+}
+
+
static noinline void check_destroy(struct xarray *xa)
{
unsigned long index;
@@ -2035,6 +2143,8 @@ static int xarray_checks(void)
check_multi_store(&array);
check_multi_store_advanced(&array);
check_get_order(&array);
+ check_xas_get_order(&array);
+ check_xas_conflict_get_order(&array);
check_xa_alloc();
check_find(&array);
check_find_entry(&array);
diff --git a/lib/ubsan.h b/lib/ubsan.h
index 0abbbac8700d..07e37d4429b4 100644
--- a/lib/ubsan.h
+++ b/lib/ubsan.h
@@ -43,7 +43,7 @@ enum {
struct type_descriptor {
u16 type_kind;
u16 type_info;
- char type_name[1];
+ char type_name[];
};
struct source_location {
@@ -124,19 +124,32 @@ typedef s64 s_max;
typedef u64 u_max;
#endif
-void __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
-void __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
-void __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
-void __ubsan_handle_negate_overflow(void *_data, void *old_val);
-void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
-void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
-void __ubsan_handle_out_of_bounds(void *_data, void *index);
-void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
-void __ubsan_handle_builtin_unreachable(void *_data);
-void __ubsan_handle_load_invalid_value(void *_data, void *val);
-void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
- unsigned long align,
- unsigned long offset);
+/*
+ * When generating Runtime Calls, Clang doesn't respect the -mregparm=3
+ * option used on i386: https://github.com/llvm/llvm-project/issues/89670
+ * Fix this for earlier Clang versions by forcing the calling convention
+ * to use non-register arguments.
+ */
+#if defined(CONFIG_X86_32) && \
+ defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 190000
+# define ubsan_linkage asmlinkage
+#else
+# define ubsan_linkage
+#endif
+
+void ubsan_linkage __ubsan_handle_add_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_sub_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_mul_overflow(void *data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_negate_overflow(void *_data, void *old_val);
+void ubsan_linkage __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr);
+void ubsan_linkage __ubsan_handle_type_mismatch_v1(void *_data, void *ptr);
+void ubsan_linkage __ubsan_handle_out_of_bounds(void *_data, void *index);
+void ubsan_linkage __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs);
+void ubsan_linkage __ubsan_handle_builtin_unreachable(void *_data);
+void ubsan_linkage __ubsan_handle_load_invalid_value(void *_data, void *val);
+void ubsan_linkage __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr,
+ unsigned long align,
+ unsigned long offset);
#endif
diff --git a/lib/vdso/Kconfig b/lib/vdso/Kconfig
index d883ac299508..c46c2300517c 100644
--- a/lib/vdso/Kconfig
+++ b/lib/vdso/Kconfig
@@ -30,4 +30,11 @@ config GENERIC_VDSO_TIME_NS
Selected by architectures which support time namespaces in the
VDSO
+config GENERIC_VDSO_OVERFLOW_PROTECT
+ bool
+ help
+ Select to add multiplication overflow protection to the VDSO
+ time getter functions for the price of an extra conditional
+ in the hotpath.
+
endif
diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
index ce2f69552003..899850bd6f0b 100644
--- a/lib/vdso/gettimeofday.c
+++ b/lib/vdso/gettimeofday.c
@@ -5,15 +5,23 @@
#include <vdso/datapage.h>
#include <vdso/helpers.h>
-#ifndef vdso_calc_delta
-/*
- * Default implementation which works for all sane clocksources. That
- * obviously excludes x86/TSC.
- */
-static __always_inline
-u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
+#ifndef vdso_calc_ns
+
+#ifdef VDSO_DELTA_NOMASK
+# define VDSO_DELTA_MASK(vd) ULLONG_MAX
+#else
+# define VDSO_DELTA_MASK(vd) (vd->mask)
+#endif
+
+#ifdef CONFIG_GENERIC_VDSO_OVERFLOW_PROTECT
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
+{
+ return delta < vd->max_cycles;
+}
+#else
+static __always_inline bool vdso_delta_ok(const struct vdso_data *vd, u64 delta)
{
- return ((cycles - last) & mask) * mult;
+ return true;
}
#endif
@@ -24,6 +32,21 @@ static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
}
#endif
+/*
+ * Default implementation which works for all sane clocksources. That
+ * obviously excludes x86/TSC.
+ */
+static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
+{
+ u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);
+
+ if (likely(vdso_delta_ok(vd, delta)))
+ return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
+
+ return mul_u64_u32_add_u64_shr(delta, vd->mult, base, vd->shift);
+}
+#endif /* vdso_calc_ns */
+
#ifndef __arch_vdso_hres_capable
static inline bool __arch_vdso_hres_capable(void)
{
@@ -49,10 +72,10 @@ static inline bool vdso_cycles_ok(u64 cycles)
static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
struct __kernel_timespec *ts)
{
- const struct vdso_data *vd;
const struct timens_offset *offs = &vdns->offset[clk];
const struct vdso_timestamp *vdso_ts;
- u64 cycles, last, ns;
+ const struct vdso_data *vd;
+ u64 cycles, ns;
u32 seq;
s64 sec;
@@ -73,10 +96,7 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_ts->nsec;
- last = vd->cycle_last;
- ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns = vdso_shift_ns(ns, vd->shift);
+ ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
@@ -111,7 +131,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
struct __kernel_timespec *ts)
{
const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
- u64 cycles, last, sec, ns;
+ u64 cycles, sec, ns;
u32 seq;
/* Allows to compile the high resolution parts out */
@@ -144,10 +164,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
cycles = __arch_get_hw_counter(vd->clock_mode, vd);
if (unlikely(!vdso_cycles_ok(cycles)))
return -1;
- ns = vdso_ts->nsec;
- last = vd->cycle_last;
- ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
- ns = vdso_shift_ns(ns, vd->shift);
+ ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
sec = vdso_ts->sec;
} while (unlikely(vdso_read_retry(vd, seq)));
diff --git a/lib/xarray.c b/lib/xarray.c
index 39f07bfc4dcc..32d4bac8c94c 100644
--- a/lib/xarray.c
+++ b/lib/xarray.c
@@ -200,7 +200,8 @@ static void *xas_start(struct xa_state *xas)
return entry;
}
-static void *xas_descend(struct xa_state *xas, struct xa_node *node)
+static __always_inline void *xas_descend(struct xa_state *xas,
+ struct xa_node *node)
{
unsigned int offset = get_offset(xas->xa_index, node);
void *entry = xa_entry(xas->xa, node, offset);
@@ -969,8 +970,22 @@ static unsigned int node_get_marks(struct xa_node *node, unsigned int offset)
return marks;
}
+static inline void node_mark_slots(struct xa_node *node, unsigned int sibs,
+ xa_mark_t mark)
+{
+ int i;
+
+ if (sibs == 0)
+ node_mark_all(node, mark);
+ else {
+ for (i = 0; i < XA_CHUNK_SIZE; i += sibs + 1)
+ node_set_mark(node, i, mark);
+ }
+}
+
static void node_set_marks(struct xa_node *node, unsigned int offset,
- struct xa_node *child, unsigned int marks)
+ struct xa_node *child, unsigned int sibs,
+ unsigned int marks)
{
xa_mark_t mark = XA_MARK_0;
@@ -978,7 +993,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
if (marks & (1 << (__force unsigned int)mark)) {
node_set_mark(node, offset, mark);
if (child)
- node_mark_all(child, mark);
+ node_mark_slots(child, sibs, mark);
}
if (mark == XA_MARK_MAX)
break;
@@ -1077,7 +1092,8 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
child->nr_values = xa_is_value(entry) ?
XA_CHUNK_SIZE : 0;
RCU_INIT_POINTER(child->parent, node);
- node_set_marks(node, offset, child, marks);
+ node_set_marks(node, offset, child, xas->xa_sibs,
+ marks);
rcu_assign_pointer(node->slots[offset],
xa_mk_node(child));
if (xa_is_value(curr))
@@ -1086,7 +1102,7 @@ void xas_split(struct xa_state *xas, void *entry, unsigned int order)
} else {
unsigned int canon = offset - xas->xa_sibs;
- node_set_marks(node, canon, NULL, marks);
+ node_set_marks(node, canon, NULL, 0, marks);
rcu_assign_pointer(node->slots[canon], entry);
while (offset > canon)
rcu_assign_pointer(node->slots[offset--],
@@ -1750,39 +1766,52 @@ unlock:
EXPORT_SYMBOL(xa_store_range);
/**
- * xa_get_order() - Get the order of an entry.
- * @xa: XArray.
- * @index: Index of the entry.
+ * xas_get_order() - Get the order of an entry.
+ * @xas: XArray operation state.
+ *
+ * Called after xas_load, the xas should not be in an error state.
*
* Return: A number between 0 and 63 indicating the order of the entry.
*/
-int xa_get_order(struct xarray *xa, unsigned long index)
+int xas_get_order(struct xa_state *xas)
{
- XA_STATE(xas, xa, index);
- void *entry;
int order = 0;
- rcu_read_lock();
- entry = xas_load(&xas);
-
- if (!entry)
- goto unlock;
-
- if (!xas.xa_node)
- goto unlock;
+ if (!xas->xa_node)
+ return 0;
for (;;) {
- unsigned int slot = xas.xa_offset + (1 << order);
+ unsigned int slot = xas->xa_offset + (1 << order);
if (slot >= XA_CHUNK_SIZE)
break;
- if (!xa_is_sibling(xas.xa_node->slots[slot]))
+ if (!xa_is_sibling(xa_entry(xas->xa, xas->xa_node, slot)))
break;
order++;
}
- order += xas.xa_node->shift;
-unlock:
+ order += xas->xa_node->shift;
+ return order;
+}
+EXPORT_SYMBOL_GPL(xas_get_order);
+
+/**
+ * xa_get_order() - Get the order of an entry.
+ * @xa: XArray.
+ * @index: Index of the entry.
+ *
+ * Return: A number between 0 and 63 indicating the order of the entry.
+ */
+int xa_get_order(struct xarray *xa, unsigned long index)
+{
+ XA_STATE(xas, xa, index);
+ int order = 0;
+ void *entry;
+
+ rcu_read_lock();
+ entry = xas_load(&xas);
+ if (entry)
+ order = xas_get_order(&xas);
rcu_read_unlock();
return order;