diff options
Diffstat (limited to 'lib')
34 files changed, 2593 insertions, 1525 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 90c88d752259..781f061ec0fa 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -1194,6 +1194,19 @@ config WQ_WATCHDOG state. This can be configured through kernel parameter "workqueue.watchdog_thresh" and its sysfs counterpart. +config WQ_CPU_INTENSIVE_REPORT + bool "Report per-cpu work items which hog CPU for too long" + depends on DEBUG_KERNEL + help + Say Y here to enable reporting of concurrency-managed per-cpu work + items that hog CPUs for longer than + workqueue.cpu_intensive_threshold_us. Workqueue automatically + detects and excludes them from concurrency management to prevent + them from stalling other per-cpu work items. Occassional + triggering may not necessarily indicate a problem. Repeated + triggering likely indicates that the work item should be switched + to use an unbound workqueue. + config TEST_LOCKUP tristate "Test module to generate lockups" depends on m @@ -2362,9 +2375,13 @@ config TEST_XARRAY tristate "Test the XArray code at runtime" config TEST_MAPLE_TREE - depends on DEBUG_KERNEL - select DEBUG_MAPLE_TREE - tristate "Test the Maple Tree code at runtime" + tristate "Test the Maple Tree code at runtime or module load" + help + Enable this option to test the maple tree code functions at boot, or + when the module is loaded. Enable "Debug Maple Trees" will enable + more verbose output on failures. + + If unsure, say N. config TEST_RHASHTABLE tristate "Perform selftest on resizable hash table" @@ -2513,6 +2530,23 @@ config BITFIELD_KUNIT If unsure, say N. +config CHECKSUM_KUNIT + tristate "KUnit test checksum functions at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + help + Enable this option to test the checksum functions at boot. + + KUnit tests run during boot and output the results to the debug log + in TAP format (http://testanything.org/). Only useful for kernel devs + running the KUnit test harness, and not intended for inclusion into a + production build. + + For more information on KUnit and unit tests in general please refer + to the KUnit documentation in Documentation/dev-tools/kunit/. + + If unsure, say N. + config HASH_KUNIT_TEST tristate "KUnit Test for integer hash functions" if !KUNIT_ALL_TESTS depends on KUNIT @@ -2705,7 +2739,7 @@ config STACKINIT_KUNIT_TEST config FORTIFY_KUNIT_TEST tristate "Test fortified str*() and mem*() function internals at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT && FORTIFY_SOURCE + depends on KUNIT default KUNIT_ALL_TESTS help Builds unit tests for checking internals of FORTIFY_SOURCE as used @@ -2722,6 +2756,11 @@ config HW_BREAKPOINT_KUNIT_TEST If unsure, say N. +config STRCAT_KUNIT_TEST + tristate "Test strcat() family of functions at runtime" if !KUNIT_ALL_TESTS + depends on KUNIT + default KUNIT_ALL_TESTS + config STRSCPY_KUNIT_TEST tristate "Test strscpy*() family of functions at runtime" if !KUNIT_ALL_TESTS depends on KUNIT diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index fd15230a703b..efae7e011956 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -15,7 +15,6 @@ if UBSAN config UBSAN_TRAP bool "On Sanitizer warnings, abort the running kernel code" depends on !COMPILE_TEST - depends on $(cc-option, -fsanitize-undefined-trap-on-error) help Building kernels with Sanitizer features enabled tends to grow the kernel size by around 5%, due to adding all the debugging @@ -27,16 +26,29 @@ config UBSAN_TRAP the system. For some system builders this is an acceptable trade-off. -config CC_HAS_UBSAN_BOUNDS - def_bool $(cc-option,-fsanitize=bounds) +config CC_HAS_UBSAN_BOUNDS_STRICT + def_bool $(cc-option,-fsanitize=bounds-strict) + help + The -fsanitize=bounds-strict option is only available on GCC, + but uses the more strict handling of arrays that includes knowledge + of flexible arrays, which is comparable to Clang's regular + -fsanitize=bounds. config CC_HAS_UBSAN_ARRAY_BOUNDS def_bool $(cc-option,-fsanitize=array-bounds) + help + Under Clang, the -fsanitize=bounds option is actually composed + of two more specific options, -fsanitize=array-bounds and + -fsanitize=local-bounds. However, -fsanitize=local-bounds can + only be used when trap mode is enabled. (See also the help for + CONFIG_LOCAL_BOUNDS.) Explicitly check for -fsanitize=array-bounds + so that we can build up the options needed for UBSAN_BOUNDS + with or without UBSAN_TRAP. config UBSAN_BOUNDS bool "Perform array index bounds checking" default UBSAN - depends on CC_HAS_UBSAN_ARRAY_BOUNDS || CC_HAS_UBSAN_BOUNDS + depends on CC_HAS_UBSAN_ARRAY_BOUNDS || CC_HAS_UBSAN_BOUNDS_STRICT help This option enables detection of directly indexed out of bounds array accesses, where the array size is known at compile time. @@ -44,33 +56,26 @@ config UBSAN_BOUNDS to the {str,mem}*cpy() family of functions (that is addressed by CONFIG_FORTIFY_SOURCE). -config UBSAN_ONLY_BOUNDS - def_bool CC_HAS_UBSAN_BOUNDS && !CC_HAS_UBSAN_ARRAY_BOUNDS - depends on UBSAN_BOUNDS +config UBSAN_BOUNDS_STRICT + def_bool UBSAN_BOUNDS && CC_HAS_UBSAN_BOUNDS_STRICT help - This is a weird case: Clang's -fsanitize=bounds includes - -fsanitize=local-bounds, but it's trapping-only, so for - Clang, we must use -fsanitize=array-bounds when we want - traditional array bounds checking enabled. For GCC, we - want -fsanitize=bounds. + GCC's bounds sanitizer. This option is used to select the + correct options in Makefile.ubsan. config UBSAN_ARRAY_BOUNDS - def_bool CC_HAS_UBSAN_ARRAY_BOUNDS - depends on UBSAN_BOUNDS + def_bool UBSAN_BOUNDS && CC_HAS_UBSAN_ARRAY_BOUNDS + help + Clang's array bounds sanitizer. This option is used to select + the correct options in Makefile.ubsan. config UBSAN_LOCAL_BOUNDS - bool "Perform array local bounds checking" - depends on UBSAN_TRAP - depends on $(cc-option,-fsanitize=local-bounds) - help - This option enables -fsanitize=local-bounds which traps when an - exception/error is detected. Therefore, it may only be enabled - with CONFIG_UBSAN_TRAP. - - Enabling this option detects errors due to accesses through a - pointer that is derived from an object of a statically-known size, - where an added offset (which may not be known statically) is - out-of-bounds. + def_bool UBSAN_ARRAY_BOUNDS && UBSAN_TRAP + help + This option enables Clang's -fsanitize=local-bounds which traps + when an access through a pointer that is derived from an object + of a statically-known size, where an added offset (which may not + be known statically) is out-of-bounds. Since this option is + trap-only, it depends on CONFIG_UBSAN_TRAP. config UBSAN_SHIFT bool "Perform checking for bit-shift overflows" diff --git a/lib/Makefile b/lib/Makefile index 876fcdeae34e..42d307ade225 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -30,7 +30,7 @@ endif lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o timerqueue.o xarray.o \ maple_tree.o idr.o extable.o irq_regs.o argv_split.o \ - flex_proportions.o ratelimit.o show_mem.o \ + flex_proportions.o ratelimit.o \ is_single_threaded.o plist.o decompress.o kobject_uevent.o \ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \ nmi_backtrace.o win_minmax.o memcat_p.o \ @@ -377,6 +377,7 @@ obj-$(CONFIG_PLDMFW) += pldmfw/ # KUnit tests CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o +obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o @@ -392,6 +393,7 @@ obj-$(CONFIG_STACKINIT_KUNIT_TEST) += stackinit_kunit.o CFLAGS_fortify_kunit.o += $(call cc-disable-warning, unsequenced) CFLAGS_fortify_kunit.o += $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_FORTIFY_KUNIT_TEST) += fortify_kunit.o +obj-$(CONFIG_STRCAT_KUNIT_TEST) += strcat_kunit.o obj-$(CONFIG_STRSCPY_KUNIT_TEST) += strscpy_kunit.o obj-$(CONFIG_SIPHASH_KUNIT_TEST) += siphash_kunit.o diff --git a/lib/checksum_kunit.c b/lib/checksum_kunit.c new file mode 100644 index 000000000000..ace3c4799fe1 --- /dev/null +++ b/lib/checksum_kunit.c @@ -0,0 +1,334 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Test cases csum_partial and csum_fold + */ + +#include <kunit/test.h> +#include <asm/checksum.h> + +#define MAX_LEN 512 +#define MAX_ALIGN 64 +#define TEST_BUFLEN (MAX_LEN + MAX_ALIGN) + +static const __wsum random_init_sum = 0x2847aab; +static const u8 random_buf[] = { + 0xac, 0xd7, 0x76, 0x69, 0x6e, 0xf2, 0x93, 0x2c, 0x1f, 0xe0, 0xde, 0x86, + 0x8f, 0x54, 0x33, 0x90, 0x95, 0xbf, 0xff, 0xb9, 0xea, 0x62, 0x6e, 0xb5, + 0xd3, 0x4f, 0xf5, 0x60, 0x50, 0x5c, 0xc7, 0xfa, 0x6d, 0x1a, 0xc7, 0xf0, + 0xd2, 0x2c, 0x12, 0x3d, 0x88, 0xe3, 0x14, 0x21, 0xb1, 0x5e, 0x45, 0x31, + 0xa2, 0x85, 0x36, 0x76, 0xba, 0xd8, 0xad, 0xbb, 0x9e, 0x49, 0x8f, 0xf7, + 0xce, 0xea, 0xef, 0xca, 0x2c, 0x29, 0xf7, 0x15, 0x5c, 0x1d, 0x4d, 0x09, + 0x1f, 0xe2, 0x14, 0x31, 0x8c, 0x07, 0x57, 0x23, 0x1f, 0x6f, 0x03, 0xe1, + 0x93, 0x19, 0x53, 0x03, 0x45, 0x49, 0x9a, 0x3b, 0x8e, 0x0c, 0x12, 0x5d, + 0x8a, 0xb8, 0x9b, 0x8c, 0x9a, 0x03, 0xe5, 0xa2, 0x43, 0xd2, 0x3b, 0x4e, + 0x7e, 0x30, 0x3c, 0x22, 0x2d, 0xc5, 0xfc, 0x9e, 0xdb, 0xc6, 0xf9, 0x69, + 0x12, 0x39, 0x1f, 0xa0, 0x11, 0x0c, 0x3f, 0xf5, 0x53, 0xc9, 0x30, 0xfb, + 0xb0, 0xdd, 0x21, 0x1d, 0x34, 0xe2, 0x65, 0x30, 0xf1, 0xe8, 0x1b, 0xe7, + 0x55, 0x0d, 0xeb, 0xbd, 0xcc, 0x9d, 0x24, 0xa4, 0xad, 0xa7, 0x93, 0x47, + 0x19, 0x2e, 0xc4, 0x5c, 0x3b, 0xc7, 0x6d, 0x95, 0x0c, 0x47, 0x60, 0xaf, + 0x5b, 0x47, 0xee, 0xdc, 0x31, 0x31, 0x14, 0x12, 0x7e, 0x9e, 0x45, 0xb1, + 0xc1, 0x69, 0x4b, 0x84, 0xfc, 0x88, 0xc1, 0x9e, 0x46, 0xb4, 0xc2, 0x25, + 0xc5, 0x6c, 0x4c, 0x22, 0x58, 0x5c, 0xbe, 0xff, 0xea, 0x88, 0x88, 0x7a, + 0xcb, 0x1c, 0x5d, 0x63, 0xa1, 0xf2, 0x33, 0x0c, 0xa2, 0x16, 0x0b, 0x6e, + 0x2b, 0x79, 0x58, 0xf7, 0xac, 0xd3, 0x6a, 0x3f, 0x81, 0x57, 0x48, 0x45, + 0xe3, 0x7c, 0xdc, 0xd6, 0x34, 0x7e, 0xe6, 0x73, 0xfa, 0xcb, 0x31, 0x18, + 0xa9, 0x0b, 0xee, 0x6b, 0x99, 0xb9, 0x2d, 0xde, 0x22, 0x0e, 0x71, 0x57, + 0x0e, 0x9b, 0x11, 0xd1, 0x15, 0x41, 0xd0, 0x6b, 0x50, 0x8a, 0x23, 0x64, + 0xe3, 0x9c, 0xb3, 0x55, 0x09, 0xe9, 0x32, 0x67, 0xf9, 0xe0, 0x73, 0xf1, + 0x60, 0x66, 0x0b, 0x88, 0x79, 0x8d, 0x4b, 0x52, 0x83, 0x20, 0x26, 0x78, + 0x49, 0x27, 0xe7, 0x3e, 0x29, 0xa8, 0x18, 0x82, 0x41, 0xdd, 0x1e, 0xcc, + 0x3b, 0xc4, 0x65, 0xd1, 0x21, 0x40, 0x72, 0xb2, 0x87, 0x5e, 0x16, 0x10, + 0x80, 0x3f, 0x4b, 0x58, 0x1c, 0xc2, 0x79, 0x20, 0xf0, 0xe0, 0x80, 0xd3, + 0x52, 0xa5, 0x19, 0x6e, 0x47, 0x90, 0x08, 0xf5, 0x50, 0xe2, 0xd6, 0xae, + 0xe9, 0x2e, 0xdc, 0xd5, 0xb4, 0x90, 0x1f, 0x79, 0x49, 0x82, 0x21, 0x84, + 0xa0, 0xb5, 0x2f, 0xff, 0x30, 0x71, 0xed, 0x80, 0x68, 0xb1, 0x6d, 0xef, + 0xf6, 0xcf, 0xb8, 0x41, 0x79, 0xf5, 0x01, 0xbc, 0x0c, 0x9b, 0x0e, 0x06, + 0xf3, 0xb0, 0xbb, 0x97, 0xb8, 0xb1, 0xfd, 0x51, 0x4e, 0xef, 0x0a, 0x3d, + 0x7a, 0x3d, 0xbd, 0x61, 0x00, 0xa2, 0xb3, 0xf0, 0x1d, 0x77, 0x7b, 0x6c, + 0x01, 0x61, 0xa5, 0xa3, 0xdb, 0xd5, 0xd5, 0xf4, 0xb5, 0x28, 0x9f, 0x0a, + 0xa3, 0x82, 0x5f, 0x4b, 0x40, 0x0f, 0x05, 0x0e, 0x78, 0xed, 0xbf, 0x17, + 0xf6, 0x5a, 0x8a, 0x7d, 0xf9, 0x45, 0xc1, 0xd7, 0x1b, 0x9d, 0x6c, 0x07, + 0x88, 0xf3, 0xbc, 0xf1, 0xea, 0x28, 0x1f, 0xb8, 0x7a, 0x60, 0x3c, 0xce, + 0x3e, 0x50, 0xb2, 0x0b, 0xcf, 0xe5, 0x08, 0x1f, 0x48, 0x04, 0xf9, 0x35, + 0x29, 0x15, 0xbe, 0x82, 0x96, 0xc2, 0x55, 0x04, 0x6c, 0x19, 0x45, 0x29, + 0x0b, 0xb6, 0x49, 0x12, 0xfb, 0x8d, 0x1b, 0x75, 0x8b, 0xd9, 0x6a, 0x5c, + 0xbe, 0x46, 0x2b, 0x41, 0xfe, 0x21, 0xad, 0x1f, 0x75, 0xe7, 0x90, 0x3d, + 0xe1, 0xdf, 0x4b, 0xe1, 0x81, 0xe2, 0x17, 0x02, 0x7b, 0x58, 0x8b, 0x92, + 0x1a, 0xac, 0x46, 0xdd, 0x2e, 0xce, 0x40, 0x09 +}; +static const __sum16 expected_results[] = { + 0x82d0, 0x8224, 0xab23, 0xaaad, 0x41ad, 0x413f, 0x4f3e, 0x4eab, 0x22ab, + 0x228c, 0x428b, 0x41ad, 0xbbac, 0xbb1d, 0x671d, 0x66ea, 0xd6e9, 0xd654, + 0x1754, 0x1655, 0x5d54, 0x5c6a, 0xfa69, 0xf9fb, 0x44fb, 0x4428, 0xf527, + 0xf432, 0x9432, 0x93e2, 0x37e2, 0x371b, 0x3d1a, 0x3cad, 0x22ad, 0x21e6, + 0x31e5, 0x3113, 0x0513, 0x0501, 0xc800, 0xc778, 0xe477, 0xe463, 0xc363, + 0xc2b2, 0x64b2, 0x646d, 0x336d, 0x32cb, 0xadca, 0xad94, 0x3794, 0x36da, + 0x5ed9, 0x5e2c, 0xa32b, 0xa28d, 0x598d, 0x58fe, 0x61fd, 0x612f, 0x772e, + 0x763f, 0xac3e, 0xac12, 0x8312, 0x821b, 0x6d1b, 0x6cbf, 0x4fbf, 0x4f72, + 0x4672, 0x4653, 0x6452, 0x643e, 0x333e, 0x32b2, 0x2bb2, 0x2b5b, 0x085b, + 0x083c, 0x993b, 0x9938, 0xb837, 0xb7a4, 0x9ea4, 0x9e51, 0x9b51, 0x9b0c, + 0x520c, 0x5172, 0x1672, 0x15e4, 0x09e4, 0x09d2, 0xacd1, 0xac47, 0xf446, + 0xf3ab, 0x67ab, 0x6711, 0x6411, 0x632c, 0xc12b, 0xc0e8, 0xeee7, 0xeeac, + 0xa0ac, 0xa02e, 0x702e, 0x6ff2, 0x4df2, 0x4dc5, 0x88c4, 0x87c8, 0xe9c7, + 0xe8ec, 0x22ec, 0x21f3, 0xb8f2, 0xb8e0, 0x7fe0, 0x7fc1, 0xdfc0, 0xdfaf, + 0xd3af, 0xd370, 0xde6f, 0xde1c, 0x151c, 0x14ec, 0x19eb, 0x193b, 0x3c3a, + 0x3c19, 0x1f19, 0x1ee5, 0x3ce4, 0x3c7f, 0x0c7f, 0x0b8e, 0x238d, 0x2372, + 0x3c71, 0x3c1c, 0x2f1c, 0x2e31, 0x7130, 0x7064, 0xd363, 0xd33f, 0x2f3f, + 0x2e92, 0x8791, 0x86fe, 0x3ffe, 0x3fe5, 0x11e5, 0x1121, 0xb520, 0xb4e5, + 0xede4, 0xed77, 0x5877, 0x586b, 0x116b, 0x110b, 0x620a, 0x61af, 0x1aaf, + 0x19c1, 0x3dc0, 0x3d8f, 0x0c8f, 0x0c7b, 0xfa7a, 0xf9fc, 0x5bfc, 0x5bb7, + 0xaab6, 0xa9f5, 0x40f5, 0x40aa, 0xbca9, 0xbbad, 0x33ad, 0x32ec, 0x94eb, + 0x94a5, 0xe0a4, 0xdfe2, 0xbae2, 0xba1d, 0x4e1d, 0x4dd1, 0x2bd1, 0x2b79, + 0xcf78, 0xceba, 0xcfb9, 0xcecf, 0x46cf, 0x4647, 0xcc46, 0xcb7b, 0xaf7b, + 0xaf1e, 0x4c1e, 0x4b7d, 0x597c, 0x5949, 0x4d49, 0x4ca7, 0x36a7, 0x369c, + 0xc89b, 0xc870, 0x4f70, 0x4f18, 0x5817, 0x576b, 0x846a, 0x8400, 0x4500, + 0x447f, 0xed7e, 0xed36, 0xa836, 0xa753, 0x2b53, 0x2a77, 0x5476, 0x5442, + 0xd641, 0xd55b, 0x625b, 0x6161, 0x9660, 0x962f, 0x7e2f, 0x7d86, 0x7286, + 0x7198, 0x0698, 0x05ff, 0x4cfe, 0x4cd1, 0x6ed0, 0x6eae, 0x60ae, 0x603d, + 0x093d, 0x092f, 0x6e2e, 0x6e1d, 0x9d1c, 0x9d07, 0x5c07, 0x5b37, 0xf036, + 0xefe6, 0x65e6, 0x65c3, 0x01c3, 0x00e0, 0x64df, 0x642c, 0x0f2c, 0x0f23, + 0x2622, 0x25f0, 0xbeef, 0xbdf6, 0xddf5, 0xdd82, 0xec81, 0xec21, 0x8621, + 0x8616, 0xfe15, 0xfd9c, 0x709c, 0x7051, 0x1e51, 0x1dce, 0xfdcd, 0xfda7, + 0x85a7, 0x855e, 0x5e5e, 0x5d77, 0x1f77, 0x1f4e, 0x774d, 0x7735, 0xf534, + 0xf4f3, 0x17f3, 0x17d5, 0x4bd4, 0x4b99, 0x8798, 0x8733, 0xb632, 0xb611, + 0x7611, 0x759f, 0xc39e, 0xc317, 0x6517, 0x6501, 0x5501, 0x5481, 0x1581, + 0x1536, 0xbd35, 0xbd19, 0xfb18, 0xfa9f, 0xda9f, 0xd9af, 0xf9ae, 0xf92e, + 0x262e, 0x25dc, 0x80db, 0x80c2, 0x12c2, 0x127b, 0x827a, 0x8272, 0x8d71, + 0x8d21, 0xab20, 0xaa4a, 0xfc49, 0xfb60, 0xcd60, 0xcc84, 0xf783, 0xf6cf, + 0x66cf, 0x66b0, 0xedaf, 0xed66, 0x6b66, 0x6b45, 0xe744, 0xe6a4, 0x31a4, + 0x3175, 0x3274, 0x3244, 0xc143, 0xc056, 0x4056, 0x3fee, 0x8eed, 0x8e80, + 0x9f7f, 0x9e89, 0xcf88, 0xced0, 0x8dd0, 0x8d57, 0x9856, 0x9855, 0xdc54, + 0xdc48, 0x4148, 0x413a, 0x3b3a, 0x3a47, 0x8a46, 0x898b, 0xf28a, 0xf1d2, + 0x40d2, 0x3fd5, 0xeed4, 0xee86, 0xff85, 0xff7b, 0xc27b, 0xc201, 0x8501, + 0x8444, 0x2344, 0x2344, 0x8143, 0x8090, 0x908f, 0x9072, 0x1972, 0x18f7, + 0xacf6, 0xacf5, 0x4bf5, 0x4b50, 0xa84f, 0xa774, 0xd273, 0xd19e, 0xdd9d, + 0xdce8, 0xb4e8, 0xb449, 0xaa49, 0xa9a6, 0x27a6, 0x2747, 0xdc46, 0xdc06, + 0xcd06, 0xcd01, 0xbf01, 0xbe89, 0xd188, 0xd0c9, 0xb9c9, 0xb8d3, 0x5ed3, + 0x5e49, 0xe148, 0xe04f, 0x9b4f, 0x9a8e, 0xc38d, 0xc372, 0x2672, 0x2606, + 0x1f06, 0x1e7e, 0x2b7d, 0x2ac1, 0x39c0, 0x38d6, 0x10d6, 0x10b7, 0x58b6, + 0x583c, 0xf83b, 0xf7ff, 0x29ff, 0x29c1, 0xd9c0, 0xd90e, 0xce0e, 0xcd3f, + 0xe83e, 0xe836, 0xc936, 0xc8ee, 0xc4ee, 0xc3f5, 0x8ef5, 0x8ecc, 0x79cc, + 0x790e, 0xf70d, 0xf677, 0x3477, 0x3422, 0x3022, 0x2fb6, 0x16b6, 0x1671, + 0xed70, 0xed65, 0x3765, 0x371c, 0x251c, 0x2421, 0x9720, 0x9705, 0x2205, + 0x217a, 0x4879, 0x480f, 0xec0e, 0xeb50, 0xa550, 0xa525, 0x6425, 0x6327, + 0x4227, 0x417a, 0x227a, 0x2205, 0x3b04, 0x3a74, 0xfd73, 0xfc92, 0x1d92, + 0x1d47, 0x3c46, 0x3bc5, 0x59c4, 0x59ad, 0x57ad, 0x5732, 0xff31, 0xfea6, + 0x6ca6, 0x6c8c, 0xc08b, 0xc045, 0xe344, 0xe316, 0x1516, 0x14d6, +}; +static const __wsum init_sums_no_overflow[] = { + 0xffffffff, 0xfffffffb, 0xfffffbfb, 0xfffffbf7, 0xfffff7f7, 0xfffff7f3, + 0xfffff3f3, 0xfffff3ef, 0xffffefef, 0xffffefeb, 0xffffebeb, 0xffffebe7, + 0xffffe7e7, 0xffffe7e3, 0xffffe3e3, 0xffffe3df, 0xffffdfdf, 0xffffdfdb, + 0xffffdbdb, 0xffffdbd7, 0xffffd7d7, 0xffffd7d3, 0xffffd3d3, 0xffffd3cf, + 0xffffcfcf, 0xffffcfcb, 0xffffcbcb, 0xffffcbc7, 0xffffc7c7, 0xffffc7c3, + 0xffffc3c3, 0xffffc3bf, 0xffffbfbf, 0xffffbfbb, 0xffffbbbb, 0xffffbbb7, + 0xffffb7b7, 0xffffb7b3, 0xffffb3b3, 0xffffb3af, 0xffffafaf, 0xffffafab, + 0xffffabab, 0xffffaba7, 0xffffa7a7, 0xffffa7a3, 0xffffa3a3, 0xffffa39f, + 0xffff9f9f, 0xffff9f9b, 0xffff9b9b, 0xffff9b97, 0xffff9797, 0xffff9793, + 0xffff9393, 0xffff938f, 0xffff8f8f, 0xffff8f8b, 0xffff8b8b, 0xffff8b87, + 0xffff8787, 0xffff8783, 0xffff8383, 0xffff837f, 0xffff7f7f, 0xffff7f7b, + 0xffff7b7b, 0xffff7b77, 0xffff7777, 0xffff7773, 0xffff7373, 0xffff736f, + 0xffff6f6f, 0xffff6f6b, 0xffff6b6b, 0xffff6b67, 0xffff6767, 0xffff6763, + 0xffff6363, 0xffff635f, 0xffff5f5f, 0xffff5f5b, 0xffff5b5b, 0xffff5b57, + 0xffff5757, 0xffff5753, 0xffff5353, 0xffff534f, 0xffff4f4f, 0xffff4f4b, + 0xffff4b4b, 0xffff4b47, 0xffff4747, 0xffff4743, 0xffff4343, 0xffff433f, + 0xffff3f3f, 0xffff3f3b, 0xffff3b3b, 0xffff3b37, 0xffff3737, 0xffff3733, + 0xffff3333, 0xffff332f, 0xffff2f2f, 0xffff2f2b, 0xffff2b2b, 0xffff2b27, + 0xffff2727, 0xffff2723, 0xffff2323, 0xffff231f, 0xffff1f1f, 0xffff1f1b, + 0xffff1b1b, 0xffff1b17, 0xffff1717, 0xffff1713, 0xffff1313, 0xffff130f, + 0xffff0f0f, 0xffff0f0b, 0xffff0b0b, 0xffff0b07, 0xffff0707, 0xffff0703, + 0xffff0303, 0xffff02ff, 0xfffffefe, 0xfffffefa, 0xfffffafa, 0xfffffaf6, + 0xfffff6f6, 0xfffff6f2, 0xfffff2f2, 0xfffff2ee, 0xffffeeee, 0xffffeeea, + 0xffffeaea, 0xffffeae6, 0xffffe6e6, 0xffffe6e2, 0xffffe2e2, 0xffffe2de, + 0xffffdede, 0xffffdeda, 0xffffdada, 0xffffdad6, 0xffffd6d6, 0xffffd6d2, + 0xffffd2d2, 0xffffd2ce, 0xffffcece, 0xffffceca, 0xffffcaca, 0xffffcac6, + 0xffffc6c6, 0xffffc6c2, 0xffffc2c2, 0xffffc2be, 0xffffbebe, 0xffffbeba, + 0xffffbaba, 0xffffbab6, 0xffffb6b6, 0xffffb6b2, 0xffffb2b2, 0xffffb2ae, + 0xffffaeae, 0xffffaeaa, 0xffffaaaa, 0xffffaaa6, 0xffffa6a6, 0xffffa6a2, + 0xffffa2a2, 0xffffa29e, 0xffff9e9e, 0xffff9e9a, 0xffff9a9a, 0xffff9a96, + 0xffff9696, 0xffff9692, 0xffff9292, 0xffff928e, 0xffff8e8e, 0xffff8e8a, + 0xffff8a8a, 0xffff8a86, 0xffff8686, 0xffff8682, 0xffff8282, 0xffff827e, + 0xffff7e7e, 0xffff7e7a, 0xffff7a7a, 0xffff7a76, 0xffff7676, 0xffff7672, + 0xffff7272, 0xffff726e, 0xffff6e6e, 0xffff6e6a, 0xffff6a6a, 0xffff6a66, + 0xffff6666, 0xffff6662, 0xffff6262, 0xffff625e, 0xffff5e5e, 0xffff5e5a, + 0xffff5a5a, 0xffff5a56, 0xffff5656, 0xffff5652, 0xffff5252, 0xffff524e, + 0xffff4e4e, 0xffff4e4a, 0xffff4a4a, 0xffff4a46, 0xffff4646, 0xffff4642, + 0xffff4242, 0xffff423e, 0xffff3e3e, 0xffff3e3a, 0xffff3a3a, 0xffff3a36, + 0xffff3636, 0xffff3632, 0xffff3232, 0xffff322e, 0xffff2e2e, 0xffff2e2a, + 0xffff2a2a, 0xffff2a26, 0xffff2626, 0xffff2622, 0xffff2222, 0xffff221e, + 0xffff1e1e, 0xffff1e1a, 0xffff1a1a, 0xffff1a16, 0xffff1616, 0xffff1612, + 0xffff1212, 0xffff120e, 0xffff0e0e, 0xffff0e0a, 0xffff0a0a, 0xffff0a06, + 0xffff0606, 0xffff0602, 0xffff0202, 0xffff01fe, 0xfffffdfd, 0xfffffdf9, + 0xfffff9f9, 0xfffff9f5, 0xfffff5f5, 0xfffff5f1, 0xfffff1f1, 0xfffff1ed, + 0xffffeded, 0xffffede9, 0xffffe9e9, 0xffffe9e5, 0xffffe5e5, 0xffffe5e1, + 0xffffe1e1, 0xffffe1dd, 0xffffdddd, 0xffffddd9, 0xffffd9d9, 0xffffd9d5, + 0xffffd5d5, 0xffffd5d1, 0xffffd1d1, 0xffffd1cd, 0xffffcdcd, 0xffffcdc9, + 0xffffc9c9, 0xffffc9c5, 0xffffc5c5, 0xffffc5c1, 0xffffc1c1, 0xffffc1bd, + 0xffffbdbd, 0xffffbdb9, 0xffffb9b9, 0xffffb9b5, 0xffffb5b5, 0xffffb5b1, + 0xffffb1b1, 0xffffb1ad, 0xffffadad, 0xffffada9, 0xffffa9a9, 0xffffa9a5, + 0xffffa5a5, 0xffffa5a1, 0xffffa1a1, 0xffffa19d, 0xffff9d9d, 0xffff9d99, + 0xffff9999, 0xffff9995, 0xffff9595, 0xffff9591, 0xffff9191, 0xffff918d, + 0xffff8d8d, 0xffff8d89, 0xffff8989, 0xffff8985, 0xffff8585, 0xffff8581, + 0xffff8181, 0xffff817d, 0xffff7d7d, 0xffff7d79, 0xffff7979, 0xffff7975, + 0xffff7575, 0xffff7571, 0xffff7171, 0xffff716d, 0xffff6d6d, 0xffff6d69, + 0xffff6969, 0xffff6965, 0xffff6565, 0xffff6561, 0xffff6161, 0xffff615d, + 0xffff5d5d, 0xffff5d59, 0xffff5959, 0xffff5955, 0xffff5555, 0xffff5551, + 0xffff5151, 0xffff514d, 0xffff4d4d, 0xffff4d49, 0xffff4949, 0xffff4945, + 0xffff4545, 0xffff4541, 0xffff4141, 0xffff413d, 0xffff3d3d, 0xffff3d39, + 0xffff3939, 0xffff3935, 0xffff3535, 0xffff3531, 0xffff3131, 0xffff312d, + 0xffff2d2d, 0xffff2d29, 0xffff2929, 0xffff2925, 0xffff2525, 0xffff2521, + 0xffff2121, 0xffff211d, 0xffff1d1d, 0xffff1d19, 0xffff1919, 0xffff1915, + 0xffff1515, 0xffff1511, 0xffff1111, 0xffff110d, 0xffff0d0d, 0xffff0d09, + 0xffff0909, 0xffff0905, 0xffff0505, 0xffff0501, 0xffff0101, 0xffff00fd, + 0xfffffcfc, 0xfffffcf8, 0xfffff8f8, 0xfffff8f4, 0xfffff4f4, 0xfffff4f0, + 0xfffff0f0, 0xfffff0ec, 0xffffecec, 0xffffece8, 0xffffe8e8, 0xffffe8e4, + 0xffffe4e4, 0xffffe4e0, 0xffffe0e0, 0xffffe0dc, 0xffffdcdc, 0xffffdcd8, + 0xffffd8d8, 0xffffd8d4, 0xffffd4d4, 0xffffd4d0, 0xffffd0d0, 0xffffd0cc, + 0xffffcccc, 0xffffccc8, 0xffffc8c8, 0xffffc8c4, 0xffffc4c4, 0xffffc4c0, + 0xffffc0c0, 0xffffc0bc, 0xffffbcbc, 0xffffbcb8, 0xffffb8b8, 0xffffb8b4, + 0xffffb4b4, 0xffffb4b0, 0xffffb0b0, 0xffffb0ac, 0xffffacac, 0xffffaca8, + 0xffffa8a8, 0xffffa8a4, 0xffffa4a4, 0xffffa4a0, 0xffffa0a0, 0xffffa09c, + 0xffff9c9c, 0xffff9c98, 0xffff9898, 0xffff9894, 0xffff9494, 0xffff9490, + 0xffff9090, 0xffff908c, 0xffff8c8c, 0xffff8c88, 0xffff8888, 0xffff8884, + 0xffff8484, 0xffff8480, 0xffff8080, 0xffff807c, 0xffff7c7c, 0xffff7c78, + 0xffff7878, 0xffff7874, 0xffff7474, 0xffff7470, 0xffff7070, 0xffff706c, + 0xffff6c6c, 0xffff6c68, 0xffff6868, 0xffff6864, 0xffff6464, 0xffff6460, + 0xffff6060, 0xffff605c, 0xffff5c5c, 0xffff5c58, 0xffff5858, 0xffff5854, + 0xffff5454, 0xffff5450, 0xffff5050, 0xffff504c, 0xffff4c4c, 0xffff4c48, + 0xffff4848, 0xffff4844, 0xffff4444, 0xffff4440, 0xffff4040, 0xffff403c, + 0xffff3c3c, 0xffff3c38, 0xffff3838, 0xffff3834, 0xffff3434, 0xffff3430, + 0xffff3030, 0xffff302c, 0xffff2c2c, 0xffff2c28, 0xffff2828, 0xffff2824, + 0xffff2424, 0xffff2420, 0xffff2020, 0xffff201c, 0xffff1c1c, 0xffff1c18, + 0xffff1818, 0xffff1814, 0xffff1414, 0xffff1410, 0xffff1010, 0xffff100c, + 0xffff0c0c, 0xffff0c08, 0xffff0808, 0xffff0804, 0xffff0404, 0xffff0400, + 0xffff0000, 0xfffffffb, +}; + +static u8 tmp_buf[TEST_BUFLEN]; + +#define full_csum(buff, len, sum) csum_fold(csum_partial(buff, len, sum)) + +#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, lhs, rhs) + +static void assert_setup_correct(struct kunit *test) +{ + CHECK_EQ(sizeof(random_buf) / sizeof(random_buf[0]), MAX_LEN); + CHECK_EQ(sizeof(expected_results) / sizeof(expected_results[0]), + MAX_LEN); + CHECK_EQ(sizeof(init_sums_no_overflow) / + sizeof(init_sums_no_overflow[0]), + MAX_LEN); +} + +/* + * Test with randomized input (pre determined random with known results). + */ +static void test_csum_fixed_random_inputs(struct kunit *test) +{ + int len, align; + __wsum result, expec, sum; + + assert_setup_correct(test); + for (align = 0; align < TEST_BUFLEN; ++align) { + memcpy(&tmp_buf[align], random_buf, + min(MAX_LEN, TEST_BUFLEN - align)); + for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN; + ++len) { + /* + * Test the precomputed random input. + */ + sum = random_init_sum; + result = full_csum(&tmp_buf[align], len, sum); + expec = expected_results[len]; + CHECK_EQ(result, expec); + } + } +} + +/* + * All ones input test. If there are any missing carry operations, it fails. + */ +static void test_csum_all_carry_inputs(struct kunit *test) +{ + int len, align; + __wsum result, expec, sum; + + assert_setup_correct(test); + memset(tmp_buf, 0xff, TEST_BUFLEN); + for (align = 0; align < TEST_BUFLEN; ++align) { + for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN; + ++len) { + /* + * All carries from input and initial sum. + */ + sum = 0xffffffff; + result = full_csum(&tmp_buf[align], len, sum); + expec = (len & 1) ? 0xff00 : 0; + CHECK_EQ(result, expec); + + /* + * All carries from input. + */ + sum = 0; + result = full_csum(&tmp_buf[align], len, sum); + if (len & 1) + expec = 0xff00; + else if (len) + expec = 0; + else + expec = 0xffff; + CHECK_EQ(result, expec); + } + } +} + +/* + * Test with input that alone doesn't cause any carries. By selecting the + * maximum initial sum, this allows us to test that there are no carries + * where there shouldn't be. + */ +static void test_csum_no_carry_inputs(struct kunit *test) +{ + int len, align; + __wsum result, expec, sum; + + assert_setup_correct(test); + memset(tmp_buf, 0x4, TEST_BUFLEN); + for (align = 0; align < TEST_BUFLEN; ++align) { + for (len = 0; len < MAX_LEN && (align + len) < TEST_BUFLEN; + ++len) { + /* + * Expect no carries. + */ + sum = init_sums_no_overflow[len]; + result = full_csum(&tmp_buf[align], len, sum); + expec = 0; + CHECK_EQ(result, expec); + + /* + * Expect one carry. + */ + sum = init_sums_no_overflow[len] + 1; + result = full_csum(&tmp_buf[align], len, sum); + expec = len ? 0xfffe : 0xffff; + CHECK_EQ(result, expec); + } + } +} + +static struct kunit_case __refdata checksum_test_cases[] = { + KUNIT_CASE(test_csum_fixed_random_inputs), + KUNIT_CASE(test_csum_all_carry_inputs), + KUNIT_CASE(test_csum_no_carry_inputs), + {} +}; + +static struct kunit_suite checksum_test_suite = { + .name = "checksum", + .test_cases = checksum_test_cases, +}; + +kunit_test_suites(&checksum_test_suite); + +MODULE_AUTHOR("Noah Goldstein <goldstein.w.n@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/lib/cpu_rmap.c b/lib/cpu_rmap.c index 73c1636b927b..4c348670da31 100644 --- a/lib/cpu_rmap.c +++ b/lib/cpu_rmap.c @@ -280,8 +280,8 @@ static void irq_cpu_rmap_release(struct kref *ref) struct irq_glue *glue = container_of(ref, struct irq_glue, notify.kref); - cpu_rmap_put(glue->rmap); glue->rmap->obj[glue->index] = NULL; + cpu_rmap_put(glue->rmap); kfree(glue); } diff --git a/lib/crypto/curve25519-hacl64.c b/lib/crypto/curve25519-hacl64.c index 771d82dc5f14..c40e5d913234 100644 --- a/lib/crypto/curve25519-hacl64.c +++ b/lib/crypto/curve25519-hacl64.c @@ -14,8 +14,6 @@ #include <crypto/curve25519.h> #include <linux/string.h> -typedef __uint128_t u128; - static __always_inline u64 u64_eq_mask(u64 a, u64 b) { u64 x = a ^ b; diff --git a/lib/crypto/poly1305-donna64.c b/lib/crypto/poly1305-donna64.c index d34cf4053668..988702c9b3b2 100644 --- a/lib/crypto/poly1305-donna64.c +++ b/lib/crypto/poly1305-donna64.c @@ -10,8 +10,6 @@ #include <asm/unaligned.h> #include <crypto/internal/poly1305.h> -typedef __uint128_t u128; - void poly1305_core_setkey(struct poly1305_core_key *key, const u8 raw_key[POLY1305_BLOCK_SIZE]) { diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 984985c39c9b..a517256a270b 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c @@ -498,6 +498,15 @@ static void debug_print_object(struct debug_obj *obj, char *msg) const struct debug_obj_descr *descr = obj->descr; static int limit; + /* + * Don't report if lookup_object_or_alloc() by the current thread + * failed because lookup_object_or_alloc()/debug_objects_oom() by a + * concurrent thread turned off debug_objects_enabled and cleared + * the hash buckets. + */ + if (!debug_objects_enabled) + return; + if (limit < 5 && descr != descr_test) { void *hint = descr->debug_hint ? descr->debug_hint(obj->object) : NULL; diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c index c8c33cbaae9e..524132f33cf0 100644 --- a/lib/fortify_kunit.c +++ b/lib/fortify_kunit.c @@ -25,6 +25,11 @@ static const char array_of_10[] = "this is 10"; static const char *ptr_of_11 = "this is 11!"; static char array_unknown[] = "compiler thinks I might change"; +/* Handle being built without CONFIG_FORTIFY_SOURCE */ +#ifndef __compiletime_strlen +# define __compiletime_strlen __builtin_strlen +#endif + static void known_sizes_test(struct kunit *test) { KUNIT_EXPECT_EQ(test, __compiletime_strlen("88888888"), 8); @@ -307,6 +312,14 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc) } while (0) DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc) +static int fortify_test_init(struct kunit *test) +{ + if (!IS_ENABLED(CONFIG_FORTIFY_SOURCE)) + kunit_skip(test, "Not built with CONFIG_FORTIFY_SOURCE=y"); + + return 0; +} + static struct kunit_case fortify_test_cases[] = { KUNIT_CASE(known_sizes_test), KUNIT_CASE(control_flow_split_test), @@ -323,6 +336,7 @@ static struct kunit_case fortify_test_cases[] = { static struct kunit_suite fortify_test_suite = { .name = "fortify", + .init = fortify_test_init, .test_cases = fortify_test_cases, }; diff --git a/lib/iov_iter.c b/lib/iov_iter.c index 960223ed9199..b667b1e2f688 100644 --- a/lib/iov_iter.c +++ b/lib/iov_iter.c @@ -14,8 +14,6 @@ #include <linux/scatterlist.h> #include <linux/instrumented.h> -#define PIPE_PARANOIA /* for now */ - /* covers ubuf and kbuf alike */ #define iterate_buf(i, n, base, len, off, __p, STEP) { \ size_t __maybe_unused off = 0; \ @@ -198,150 +196,6 @@ static int copyin(void *to, const void __user *from, size_t n) return res; } -#ifdef PIPE_PARANOIA -static bool sanity(const struct iov_iter *i) -{ - struct pipe_inode_info *pipe = i->pipe; - unsigned int p_head = pipe->head; - unsigned int p_tail = pipe->tail; - unsigned int p_occupancy = pipe_occupancy(p_head, p_tail); - unsigned int i_head = i->head; - unsigned int idx; - - if (i->last_offset) { - struct pipe_buffer *p; - if (unlikely(p_occupancy == 0)) - goto Bad; // pipe must be non-empty - if (unlikely(i_head != p_head - 1)) - goto Bad; // must be at the last buffer... - - p = pipe_buf(pipe, i_head); - if (unlikely(p->offset + p->len != abs(i->last_offset))) - goto Bad; // ... at the end of segment - } else { - if (i_head != p_head) - goto Bad; // must be right after the last buffer - } - return true; -Bad: - printk(KERN_ERR "idx = %d, offset = %d\n", i_head, i->last_offset); - printk(KERN_ERR "head = %d, tail = %d, buffers = %d\n", - p_head, p_tail, pipe->ring_size); - for (idx = 0; idx < pipe->ring_size; idx++) - printk(KERN_ERR "[%p %p %d %d]\n", - pipe->bufs[idx].ops, - pipe->bufs[idx].page, - pipe->bufs[idx].offset, - pipe->bufs[idx].len); - WARN_ON(1); - return false; -} -#else -#define sanity(i) true -#endif - -static struct page *push_anon(struct pipe_inode_info *pipe, unsigned size) -{ - struct page *page = alloc_page(GFP_USER); - if (page) { - struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); - *buf = (struct pipe_buffer) { - .ops = &default_pipe_buf_ops, - .page = page, - .offset = 0, - .len = size - }; - } - return page; -} - -static void push_page(struct pipe_inode_info *pipe, struct page *page, - unsigned int offset, unsigned int size) -{ - struct pipe_buffer *buf = pipe_buf(pipe, pipe->head++); - *buf = (struct pipe_buffer) { - .ops = &page_cache_pipe_buf_ops, - .page = page, - .offset = offset, - .len = size - }; - get_page(page); -} - -static inline int last_offset(const struct pipe_buffer *buf) -{ - if (buf->ops == &default_pipe_buf_ops) - return buf->len; // buf->offset is 0 for those - else - return -(buf->offset + buf->len); -} - -static struct page *append_pipe(struct iov_iter *i, size_t size, - unsigned int *off) -{ - struct pipe_inode_info *pipe = i->pipe; - int offset = i->last_offset; - struct pipe_buffer *buf; - struct page *page; - - if (offset > 0 && offset < PAGE_SIZE) { - // some space in the last buffer; add to it - buf = pipe_buf(pipe, pipe->head - 1); - size = min_t(size_t, size, PAGE_SIZE - offset); - buf->len += size; - i->last_offset += size; - i->count -= size; - *off = offset; - return buf->page; - } - // OK, we need a new buffer - *off = 0; - size = min_t(size_t, size, PAGE_SIZE); - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) - return NULL; - page = push_anon(pipe, size); - if (!page) - return NULL; - i->head = pipe->head - 1; - i->last_offset = size; - i->count -= size; - return page; -} - -static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t bytes, - struct iov_iter *i) -{ - struct pipe_inode_info *pipe = i->pipe; - unsigned int head = pipe->head; - - if (unlikely(bytes > i->count)) - bytes = i->count; - - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - if (offset && i->last_offset == -offset) { // could we merge it? - struct pipe_buffer *buf = pipe_buf(pipe, head - 1); - if (buf->page == page) { - buf->len += bytes; - i->last_offset -= bytes; - i->count -= bytes; - return bytes; - } - } - if (pipe_full(pipe->head, pipe->tail, pipe->max_usage)) - return 0; - - push_page(pipe, page, offset, bytes); - i->last_offset = -(offset + bytes); - i->head = head; - i->count -= bytes; - return bytes; -} - /* * fault_in_iov_iter_readable - fault in iov iterator for reading * @i: iterator @@ -446,46 +300,6 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_init); -// returns the offset in partial buffer (if any) -static inline unsigned int pipe_npages(const struct iov_iter *i, int *npages) -{ - struct pipe_inode_info *pipe = i->pipe; - int used = pipe->head - pipe->tail; - int off = i->last_offset; - - *npages = max((int)pipe->max_usage - used, 0); - - if (off > 0 && off < PAGE_SIZE) { // anon and not full - (*npages)++; - return off; - } - return 0; -} - -static size_t copy_pipe_to_iter(const void *addr, size_t bytes, - struct iov_iter *i) -{ - unsigned int off, chunk; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - for (size_t n = bytes; n; n -= chunk) { - struct page *page = append_pipe(i, n, &off); - chunk = min_t(size_t, n, PAGE_SIZE - off); - if (!page) - return bytes - n; - memcpy_to_page(page, off, addr, chunk); - addr += chunk; - } - return bytes; -} - static __wsum csum_and_memcpy(void *to, const void *from, size_t len, __wsum sum, size_t off) { @@ -493,44 +307,10 @@ static __wsum csum_and_memcpy(void *to, const void *from, size_t len, return csum_block_add(sum, next, off); } -static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes, - struct iov_iter *i, __wsum *sump) -{ - __wsum sum = *sump; - size_t off = 0; - unsigned int chunk, r; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - while (bytes) { - struct page *page = append_pipe(i, bytes, &r); - char *p; - - if (!page) - break; - chunk = min_t(size_t, bytes, PAGE_SIZE - r); - p = kmap_local_page(page); - sum = csum_and_memcpy(p + r, addr + off, chunk, sum, off); - kunmap_local(p); - off += chunk; - bytes -= chunk; - } - *sump = sum; - return off; -} - size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_pipe_to_iter(addr, bytes, i); if (user_backed_iter(i)) might_fault(); iterate_and_advance(i, bytes, base, len, off, @@ -552,42 +332,6 @@ static int copyout_mc(void __user *to, const void *from, size_t n) return n; } -static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, - struct iov_iter *i) -{ - size_t xfer = 0; - unsigned int off, chunk; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - while (bytes) { - struct page *page = append_pipe(i, bytes, &off); - unsigned long rem; - char *p; - - if (!page) - break; - chunk = min_t(size_t, bytes, PAGE_SIZE - off); - p = kmap_local_page(page); - rem = copy_mc_to_kernel(p + off, addr + xfer, chunk); - chunk -= rem; - kunmap_local(p); - xfer += chunk; - bytes -= chunk; - if (rem) { - iov_iter_revert(i, rem); - break; - } - } - return xfer; -} - /** * _copy_mc_to_iter - copy to iter with source memory error exception handling * @addr: source kernel address @@ -607,9 +351,8 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes, * alignment and poison alignment assumptions to avoid re-triggering * hardware exceptions. * - * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies. - * Compare to copy_to_iter() where only ITER_IOVEC attempts might return - * a short copy. + * * ITER_KVEC and ITER_BVEC can return short copies. Compare to + * copy_to_iter() where only ITER_IOVEC attempts might return a short copy. * * Return: number of bytes copied (may be %0) */ @@ -617,8 +360,6 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i) { if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_mc_pipe_to_iter(addr, bytes, i); if (user_backed_iter(i)) might_fault(); __iterate_and_advance(i, bytes, base, len, off, @@ -732,8 +473,6 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes, return 0; if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_page_to_iter_pipe(page, offset, bytes, i); page += offset / PAGE_SIZE; // first subpage offset %= PAGE_SIZE; while (1) { @@ -764,8 +503,6 @@ size_t copy_page_to_iter_nofault(struct page *page, unsigned offset, size_t byte return 0; if (WARN_ON_ONCE(i->data_source)) return 0; - if (unlikely(iov_iter_is_pipe(i))) - return copy_page_to_iter_pipe(page, offset, bytes, i); page += offset / PAGE_SIZE; // first subpage offset %= PAGE_SIZE; while (1) { @@ -818,36 +555,8 @@ size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes, } EXPORT_SYMBOL(copy_page_from_iter); -static size_t pipe_zero(size_t bytes, struct iov_iter *i) -{ - unsigned int chunk, off; - - if (unlikely(bytes > i->count)) - bytes = i->count; - if (unlikely(!bytes)) - return 0; - - if (!sanity(i)) - return 0; - - for (size_t n = bytes; n; n -= chunk) { - struct page *page = append_pipe(i, n, &off); - char *p; - - if (!page) - return bytes - n; - chunk = min_t(size_t, n, PAGE_SIZE - off); - p = kmap_local_page(page); - memset(p + off, 0, chunk); - kunmap_local(p); - } - return bytes; -} - size_t iov_iter_zero(size_t bytes, struct iov_iter *i) { - if (unlikely(iov_iter_is_pipe(i))) - return pipe_zero(bytes, i); iterate_and_advance(i, bytes, base, len, count, clear_user(base, len), memset(base, 0, len) @@ -878,32 +587,6 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt } EXPORT_SYMBOL(copy_page_from_iter_atomic); -static void pipe_advance(struct iov_iter *i, size_t size) -{ - struct pipe_inode_info *pipe = i->pipe; - int off = i->last_offset; - - if (!off && !size) { - pipe_discard_from(pipe, i->start_head); // discard everything - return; - } - i->count -= size; - while (1) { - struct pipe_buffer *buf = pipe_buf(pipe, i->head); - if (off) /* make it relative to the beginning of buffer */ - size += abs(off) - buf->offset; - if (size <= buf->len) { - buf->len = size; - i->last_offset = last_offset(buf); - break; - } - size -= buf->len; - i->head++; - off = 0; - } - pipe_discard_from(pipe, i->head + 1); // discard everything past this one -} - static void iov_iter_bvec_advance(struct iov_iter *i, size_t size) { const struct bio_vec *bvec, *end; @@ -955,8 +638,6 @@ void iov_iter_advance(struct iov_iter *i, size_t size) iov_iter_iovec_advance(i, size); } else if (iov_iter_is_bvec(i)) { iov_iter_bvec_advance(i, size); - } else if (iov_iter_is_pipe(i)) { - pipe_advance(i, size); } else if (iov_iter_is_discard(i)) { i->count -= size; } @@ -970,26 +651,6 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll) if (WARN_ON(unroll > MAX_RW_COUNT)) return; i->count += unroll; - if (unlikely(iov_iter_is_pipe(i))) { - struct pipe_inode_info *pipe = i->pipe; - unsigned int head = pipe->head; - - while (head > i->start_head) { - struct pipe_buffer *b = pipe_buf(pipe, --head); - if (unroll < b->len) { - b->len -= unroll; - i->last_offset = last_offset(b); - i->head = head; - return; - } - unroll -= b->len; - pipe_buf_release(pipe, b); - pipe->head--; - } - i->last_offset = 0; - i->head = head; - return; - } if (unlikely(iov_iter_is_discard(i))) return; if (unroll <= i->iov_offset) { @@ -1079,24 +740,6 @@ void iov_iter_bvec(struct iov_iter *i, unsigned int direction, } EXPORT_SYMBOL(iov_iter_bvec); -void iov_iter_pipe(struct iov_iter *i, unsigned int direction, - struct pipe_inode_info *pipe, - size_t count) -{ - BUG_ON(direction != READ); - WARN_ON(pipe_full(pipe->head, pipe->tail, pipe->ring_size)); - *i = (struct iov_iter){ - .iter_type = ITER_PIPE, - .data_source = false, - .pipe = pipe, - .head = pipe->head, - .start_head = pipe->head, - .last_offset = 0, - .count = count - }; -} -EXPORT_SYMBOL(iov_iter_pipe); - /** * iov_iter_xarray - Initialise an I/O iterator to use the pages in an xarray * @i: The iterator to initialise. @@ -1224,19 +867,6 @@ bool iov_iter_is_aligned(const struct iov_iter *i, unsigned addr_mask, if (iov_iter_is_bvec(i)) return iov_iter_aligned_bvec(i, addr_mask, len_mask); - if (iov_iter_is_pipe(i)) { - size_t size = i->count; - - if (size & len_mask) - return false; - if (size && i->last_offset > 0) { - if (i->last_offset & addr_mask) - return false; - } - - return true; - } - if (iov_iter_is_xarray(i)) { if (i->count & len_mask) return false; @@ -1307,14 +937,6 @@ unsigned long iov_iter_alignment(const struct iov_iter *i) if (iov_iter_is_bvec(i)) return iov_iter_alignment_bvec(i); - if (iov_iter_is_pipe(i)) { - size_t size = i->count; - - if (size && i->last_offset > 0) - return size | i->last_offset; - return size; - } - if (iov_iter_is_xarray(i)) return (i->xarray_start + i->iov_offset) | i->count; @@ -1367,36 +989,6 @@ static int want_pages_array(struct page ***res, size_t size, return count; } -static ssize_t pipe_get_pages(struct iov_iter *i, - struct page ***pages, size_t maxsize, unsigned maxpages, - size_t *start) -{ - unsigned int npages, count, off, chunk; - struct page **p; - size_t left; - - if (!sanity(i)) - return -EFAULT; - - *start = off = pipe_npages(i, &npages); - if (!npages) - return -EFAULT; - count = want_pages_array(pages, maxsize, off, min(npages, maxpages)); - if (!count) - return -ENOMEM; - p = *pages; - for (npages = 0, left = maxsize ; npages < count; npages++, left -= chunk) { - struct page *page = append_pipe(i, left, &off); - if (!page) - break; - chunk = min_t(size_t, left, PAGE_SIZE - off); - get_page(*p++ = page); - } - if (!npages) - return -EFAULT; - return maxsize - left; -} - static ssize_t iter_xarray_populate_pages(struct page **pages, struct xarray *xa, pgoff_t index, unsigned int nr_pages) { @@ -1490,8 +1082,7 @@ static struct page *first_bvec_segment(const struct iov_iter *i, static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, struct page ***pages, size_t maxsize, - unsigned int maxpages, size_t *start, - iov_iter_extraction_t extraction_flags) + unsigned int maxpages, size_t *start) { unsigned int n, gup_flags = 0; @@ -1501,8 +1092,6 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, return 0; if (maxsize > MAX_RW_COUNT) maxsize = MAX_RW_COUNT; - if (extraction_flags & ITER_ALLOW_P2PDMA) - gup_flags |= FOLL_PCI_P2PDMA; if (likely(user_backed_iter(i))) { unsigned long addr; @@ -1547,56 +1136,36 @@ static ssize_t __iov_iter_get_pages_alloc(struct iov_iter *i, } return maxsize; } - if (iov_iter_is_pipe(i)) - return pipe_get_pages(i, pages, maxsize, maxpages, start); if (iov_iter_is_xarray(i)) return iter_xarray_get_pages(i, pages, maxsize, maxpages, start); return -EFAULT; } -ssize_t iov_iter_get_pages(struct iov_iter *i, - struct page **pages, size_t maxsize, unsigned maxpages, - size_t *start, iov_iter_extraction_t extraction_flags) +ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, + size_t maxsize, unsigned maxpages, size_t *start) { if (!maxpages) return 0; BUG_ON(!pages); - return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, - start, extraction_flags); -} -EXPORT_SYMBOL_GPL(iov_iter_get_pages); - -ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, - size_t maxsize, unsigned maxpages, size_t *start) -{ - return iov_iter_get_pages(i, pages, maxsize, maxpages, start, 0); + return __iov_iter_get_pages_alloc(i, &pages, maxsize, maxpages, start); } EXPORT_SYMBOL(iov_iter_get_pages2); -ssize_t iov_iter_get_pages_alloc(struct iov_iter *i, - struct page ***pages, size_t maxsize, - size_t *start, iov_iter_extraction_t extraction_flags) +ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, + struct page ***pages, size_t maxsize, size_t *start) { ssize_t len; *pages = NULL; - len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start, - extraction_flags); + len = __iov_iter_get_pages_alloc(i, pages, maxsize, ~0U, start); if (len <= 0) { kvfree(*pages); *pages = NULL; } return len; } -EXPORT_SYMBOL_GPL(iov_iter_get_pages_alloc); - -ssize_t iov_iter_get_pages_alloc2(struct iov_iter *i, - struct page ***pages, size_t maxsize, size_t *start) -{ - return iov_iter_get_pages_alloc(i, pages, maxsize, start, 0); -} EXPORT_SYMBOL(iov_iter_get_pages_alloc2); size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, @@ -1638,9 +1207,7 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate, } sum = csum_shift(csstate->csum, csstate->off); - if (unlikely(iov_iter_is_pipe(i))) - bytes = csum_and_copy_to_pipe_iter(addr, bytes, i, &sum); - else iterate_and_advance(i, bytes, base, len, off, ({ + iterate_and_advance(i, bytes, base, len, off, ({ next = csum_and_copy_to_user(addr + off, base, len); sum = csum_block_add(sum, next, off); next ? 0 : len; @@ -1725,15 +1292,6 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages) return iov_npages(i, maxpages); if (iov_iter_is_bvec(i)) return bvec_npages(i, maxpages); - if (iov_iter_is_pipe(i)) { - int npages; - - if (!sanity(i)) - return 0; - - pipe_npages(i, &npages); - return min(npages, maxpages); - } if (iov_iter_is_xarray(i)) { unsigned offset = (i->xarray_start + i->iov_offset) % PAGE_SIZE; int npages = DIV_ROUND_UP(offset + i->count, PAGE_SIZE); @@ -1746,10 +1304,6 @@ EXPORT_SYMBOL(iov_iter_npages); const void *dup_iter(struct iov_iter *new, struct iov_iter *old, gfp_t flags) { *new = *old; - if (unlikely(iov_iter_is_pipe(new))) { - WARN_ON(1); - return NULL; - } if (iov_iter_is_bvec(new)) return new->bvec = kmemdup(new->bvec, new->nr_segs * sizeof(struct bio_vec), diff --git a/lib/kobject.c b/lib/kobject.c index f79a434e1231..16d530f9c174 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -281,8 +281,7 @@ int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, kfree_const(s); if (!t) return -ENOMEM; - strreplace(t, '/', '!'); - s = t; + s = strreplace(t, '/', '!'); } kfree_const(kobj->name); kobj->name = s; diff --git a/lib/kunit/executor_test.c b/lib/kunit/executor_test.c index 0cea31c27b23..ce6749af374d 100644 --- a/lib/kunit/executor_test.c +++ b/lib/kunit/executor_test.c @@ -125,11 +125,6 @@ kunit_test_suites(&executor_test_suite); /* Test helpers */ -static void kfree_res_free(struct kunit_resource *res) -{ - kfree(res->data); -} - /* Use the resource API to register a call to kfree(to_free). * Since we never actually use the resource, it's safe to use on const data. */ @@ -138,8 +133,10 @@ static void kfree_at_end(struct kunit *test, const void *to_free) /* kfree() handles NULL already, but avoid allocating a no-op cleanup. */ if (IS_ERR_OR_NULL(to_free)) return; - kunit_alloc_resource(test, NULL, kfree_res_free, GFP_KERNEL, - (void *)to_free); + + kunit_add_action(test, + (kunit_action_t *)kfree, + (void *)to_free); } static struct kunit_suite *alloc_fake_suite(struct kunit *test, diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c index cd8b7e51d02b..b69b689ea850 100644 --- a/lib/kunit/kunit-example-test.c +++ b/lib/kunit/kunit-example-test.c @@ -42,6 +42,16 @@ static int example_test_init(struct kunit *test) } /* + * This is run once after each test case, see the comment on + * example_test_suite for more information. + */ +static void example_test_exit(struct kunit *test) +{ + kunit_info(test, "cleaning up\n"); +} + + +/* * This is run once before all test cases in the suite. * See the comment on example_test_suite for more information. */ @@ -53,6 +63,16 @@ static int example_test_init_suite(struct kunit_suite *suite) } /* + * This is run once after all test cases in the suite. + * See the comment on example_test_suite for more information. + */ +static void example_test_exit_suite(struct kunit_suite *suite) +{ + kunit_info(suite, "exiting suite\n"); +} + + +/* * This test should always be skipped. */ static void example_skip_test(struct kunit *test) @@ -167,6 +187,39 @@ static void example_static_stub_test(struct kunit *test) KUNIT_EXPECT_EQ(test, add_one(1), 2); } +static const struct example_param { + int value; +} example_params_array[] = { + { .value = 2, }, + { .value = 1, }, + { .value = 0, }, +}; + +static void example_param_get_desc(const struct example_param *p, char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "example value %d", p->value); +} + +KUNIT_ARRAY_PARAM(example, example_params_array, example_param_get_desc); + +/* + * This test shows the use of params. + */ +static void example_params_test(struct kunit *test) +{ + const struct example_param *param = test->param_value; + + /* By design, param pointer will not be NULL */ + KUNIT_ASSERT_NOT_NULL(test, param); + + /* Test can be skipped on unsupported param values */ + if (!param->value) + kunit_skip(test, "unsupported param value"); + + /* You can use param values for parameterized testing */ + KUNIT_EXPECT_EQ(test, param->value % param->value, 0); +} + /* * Here we make a list of all the test cases we want to add to the test suite * below. @@ -183,6 +236,7 @@ static struct kunit_case example_test_cases[] = { KUNIT_CASE(example_mark_skipped_test), KUNIT_CASE(example_all_expect_macros_test), KUNIT_CASE(example_static_stub_test), + KUNIT_CASE_PARAM(example_params_test, example_gen_params), {} }; @@ -211,7 +265,9 @@ static struct kunit_case example_test_cases[] = { static struct kunit_suite example_test_suite = { .name = "example", .init = example_test_init, + .exit = example_test_exit, .suite_init = example_test_init_suite, + .suite_exit = example_test_exit_suite, .test_cases = example_test_cases, }; diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c index 42e44caa1bdd..83d8e90ca7a2 100644 --- a/lib/kunit/kunit-test.c +++ b/lib/kunit/kunit-test.c @@ -112,7 +112,7 @@ struct kunit_test_resource_context { struct kunit test; bool is_resource_initialized; int allocate_order[2]; - int free_order[2]; + int free_order[4]; }; static int fake_resource_init(struct kunit_resource *res, void *context) @@ -403,6 +403,88 @@ static void kunit_resource_test_named(struct kunit *test) KUNIT_EXPECT_TRUE(test, list_empty(&test->resources)); } +static void increment_int(void *ctx) +{ + int *i = (int *)ctx; + (*i)++; +} + +static void kunit_resource_test_action(struct kunit *test) +{ + int num_actions = 0; + + kunit_add_action(test, increment_int, &num_actions); + KUNIT_EXPECT_EQ(test, num_actions, 0); + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 1); + + /* Once we've cleaned up, the action queue is empty. */ + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 1); + + /* Check the same function can be deferred multiple times. */ + kunit_add_action(test, increment_int, &num_actions); + kunit_add_action(test, increment_int, &num_actions); + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 3); +} +static void kunit_resource_test_remove_action(struct kunit *test) +{ + int num_actions = 0; + + kunit_add_action(test, increment_int, &num_actions); + KUNIT_EXPECT_EQ(test, num_actions, 0); + + kunit_remove_action(test, increment_int, &num_actions); + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 0); +} +static void kunit_resource_test_release_action(struct kunit *test) +{ + int num_actions = 0; + + kunit_add_action(test, increment_int, &num_actions); + KUNIT_EXPECT_EQ(test, num_actions, 0); + /* Runs immediately on trigger. */ + kunit_release_action(test, increment_int, &num_actions); + KUNIT_EXPECT_EQ(test, num_actions, 1); + + /* Doesn't run again on test exit. */ + kunit_cleanup(test); + KUNIT_EXPECT_EQ(test, num_actions, 1); +} +static void action_order_1(void *ctx) +{ + struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx; + + KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 1); + kunit_log(KERN_INFO, current->kunit_test, "action_order_1"); +} +static void action_order_2(void *ctx) +{ + struct kunit_test_resource_context *res_ctx = (struct kunit_test_resource_context *)ctx; + + KUNIT_RESOURCE_TEST_MARK_ORDER(res_ctx, free_order, 2); + kunit_log(KERN_INFO, current->kunit_test, "action_order_2"); +} +static void kunit_resource_test_action_ordering(struct kunit *test) +{ + struct kunit_test_resource_context *ctx = test->priv; + + kunit_add_action(test, action_order_1, ctx); + kunit_add_action(test, action_order_2, ctx); + kunit_add_action(test, action_order_1, ctx); + kunit_add_action(test, action_order_2, ctx); + kunit_remove_action(test, action_order_1, ctx); + kunit_release_action(test, action_order_2, ctx); + kunit_cleanup(test); + + /* [2 is triggered] [2], [(1 is cancelled)] [1] */ + KUNIT_EXPECT_EQ(test, ctx->free_order[0], 2); + KUNIT_EXPECT_EQ(test, ctx->free_order[1], 2); + KUNIT_EXPECT_EQ(test, ctx->free_order[2], 1); +} + static int kunit_resource_test_init(struct kunit *test) { struct kunit_test_resource_context *ctx = @@ -434,6 +516,10 @@ static struct kunit_case kunit_resource_test_cases[] = { KUNIT_CASE(kunit_resource_test_proper_free_ordering), KUNIT_CASE(kunit_resource_test_static), KUNIT_CASE(kunit_resource_test_named), + KUNIT_CASE(kunit_resource_test_action), + KUNIT_CASE(kunit_resource_test_remove_action), + KUNIT_CASE(kunit_resource_test_release_action), + KUNIT_CASE(kunit_resource_test_action_ordering), {} }; diff --git a/lib/kunit/resource.c b/lib/kunit/resource.c index c414df922f34..f0209252b179 100644 --- a/lib/kunit/resource.c +++ b/lib/kunit/resource.c @@ -77,3 +77,102 @@ int kunit_destroy_resource(struct kunit *test, kunit_resource_match_t match, return 0; } EXPORT_SYMBOL_GPL(kunit_destroy_resource); + +struct kunit_action_ctx { + struct kunit_resource res; + kunit_action_t *func; + void *ctx; +}; + +static void __kunit_action_free(struct kunit_resource *res) +{ + struct kunit_action_ctx *action_ctx = container_of(res, struct kunit_action_ctx, res); + + action_ctx->func(action_ctx->ctx); +} + + +int kunit_add_action(struct kunit *test, void (*action)(void *), void *ctx) +{ + struct kunit_action_ctx *action_ctx; + + KUNIT_ASSERT_NOT_NULL_MSG(test, action, "Tried to action a NULL function!"); + + action_ctx = kzalloc(sizeof(*action_ctx), GFP_KERNEL); + if (!action_ctx) + return -ENOMEM; + + action_ctx->func = action; + action_ctx->ctx = ctx; + + action_ctx->res.should_kfree = true; + /* As init is NULL, this cannot fail. */ + __kunit_add_resource(test, NULL, __kunit_action_free, &action_ctx->res, action_ctx); + + return 0; +} +EXPORT_SYMBOL_GPL(kunit_add_action); + +int kunit_add_action_or_reset(struct kunit *test, void (*action)(void *), + void *ctx) +{ + int res = kunit_add_action(test, action, ctx); + + if (res) + action(ctx); + return res; +} +EXPORT_SYMBOL_GPL(kunit_add_action_or_reset); + +static bool __kunit_action_match(struct kunit *test, + struct kunit_resource *res, void *match_data) +{ + struct kunit_action_ctx *match_ctx = (struct kunit_action_ctx *)match_data; + struct kunit_action_ctx *res_ctx = container_of(res, struct kunit_action_ctx, res); + + /* Make sure this is a free function. */ + if (res->free != __kunit_action_free) + return false; + + /* Both the function and context data should match. */ + return (match_ctx->func == res_ctx->func) && (match_ctx->ctx == res_ctx->ctx); +} + +void kunit_remove_action(struct kunit *test, + kunit_action_t *action, + void *ctx) +{ + struct kunit_action_ctx match_ctx; + struct kunit_resource *res; + + match_ctx.func = action; + match_ctx.ctx = ctx; + + res = kunit_find_resource(test, __kunit_action_match, &match_ctx); + if (res) { + /* Remove the free function so we don't run the action. */ + res->free = NULL; + kunit_remove_resource(test, res); + kunit_put_resource(res); + } +} +EXPORT_SYMBOL_GPL(kunit_remove_action); + +void kunit_release_action(struct kunit *test, + kunit_action_t *action, + void *ctx) +{ + struct kunit_action_ctx match_ctx; + struct kunit_resource *res; + + match_ctx.func = action; + match_ctx.ctx = ctx; + + res = kunit_find_resource(test, __kunit_action_match, &match_ctx); + if (res) { + kunit_remove_resource(test, res); + /* We have to put() this here, else free won't be called. */ + kunit_put_resource(res); + } +} +EXPORT_SYMBOL_GPL(kunit_release_action); diff --git a/lib/kunit/test.c b/lib/kunit/test.c index e2910b261112..84e4666555c9 100644 --- a/lib/kunit/test.c +++ b/lib/kunit/test.c @@ -185,16 +185,28 @@ static void kunit_print_suite_start(struct kunit_suite *suite) kunit_suite_num_test_cases(suite)); } -static void kunit_print_ok_not_ok(void *test_or_suite, - bool is_test, +/* Currently supported test levels */ +enum { + KUNIT_LEVEL_SUITE = 0, + KUNIT_LEVEL_CASE, + KUNIT_LEVEL_CASE_PARAM, +}; + +static void kunit_print_ok_not_ok(struct kunit *test, + unsigned int test_level, enum kunit_status status, size_t test_number, const char *description, const char *directive) { - struct kunit_suite *suite = is_test ? NULL : test_or_suite; - struct kunit *test = is_test ? test_or_suite : NULL; const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : ""; + const char *directive_body = (status == KUNIT_SKIPPED) ? directive : ""; + + /* + * When test is NULL assume that results are from the suite + * and today suite results are expected at level 0 only. + */ + WARN(!test && test_level, "suite test level can't be %u!\n", test_level); /* * We do not log the test suite results as doing so would @@ -203,17 +215,18 @@ static void kunit_print_ok_not_ok(void *test_or_suite, * separately seq_printf() the suite results for the debugfs * representation. */ - if (suite) + if (!test) pr_info("%s %zd %s%s%s\n", kunit_status_to_ok_not_ok(status), test_number, description, directive_header, - (status == KUNIT_SKIPPED) ? directive : ""); + directive_body); else kunit_log(KERN_INFO, test, - KUNIT_SUBTEST_INDENT "%s %zd %s%s%s", + "%*s%s %zd %s%s%s", + KUNIT_INDENT_LEN * test_level, "", kunit_status_to_ok_not_ok(status), test_number, description, directive_header, - (status == KUNIT_SKIPPED) ? directive : ""); + directive_body); } enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite) @@ -239,7 +252,7 @@ static size_t kunit_suite_counter = 1; static void kunit_print_suite_end(struct kunit_suite *suite) { - kunit_print_ok_not_ok((void *)suite, false, + kunit_print_ok_not_ok(NULL, KUNIT_LEVEL_SUITE, kunit_suite_has_succeeded(suite), kunit_suite_counter++, suite->name, @@ -310,7 +323,7 @@ static void kunit_fail(struct kunit *test, const struct kunit_loc *loc, string_stream_destroy(stream); } -static void __noreturn kunit_abort(struct kunit *test) +void __noreturn __kunit_abort(struct kunit *test) { kunit_try_catch_throw(&test->try_catch); /* Does not return. */ @@ -322,8 +335,9 @@ static void __noreturn kunit_abort(struct kunit *test) */ WARN_ONCE(true, "Throw could not abort from test!\n"); } +EXPORT_SYMBOL_GPL(__kunit_abort); -void kunit_do_failed_assertion(struct kunit *test, +void __kunit_do_failed_assertion(struct kunit *test, const struct kunit_loc *loc, enum kunit_assert_type type, const struct kunit_assert *assert, @@ -340,11 +354,8 @@ void kunit_do_failed_assertion(struct kunit *test, kunit_fail(test, loc, type, assert, assert_format, &message); va_end(args); - - if (type == KUNIT_ASSERTION) - kunit_abort(test); } -EXPORT_SYMBOL_GPL(kunit_do_failed_assertion); +EXPORT_SYMBOL_GPL(__kunit_do_failed_assertion); void kunit_init_test(struct kunit *test, const char *name, char *log) { @@ -419,15 +430,54 @@ static void kunit_try_run_case(void *data) * thread will resume control and handle any necessary clean up. */ kunit_run_case_internal(test, suite, test_case); - /* This line may never be reached. */ +} + +static void kunit_try_run_case_cleanup(void *data) +{ + struct kunit_try_catch_context *ctx = data; + struct kunit *test = ctx->test; + struct kunit_suite *suite = ctx->suite; + + current->kunit_test = test; + kunit_run_case_cleanup(test, suite); } +static void kunit_catch_run_case_cleanup(void *data) +{ + struct kunit_try_catch_context *ctx = data; + struct kunit *test = ctx->test; + int try_exit_code = kunit_try_catch_get_result(&test->try_catch); + + /* It is always a failure if cleanup aborts. */ + kunit_set_failure(test); + + if (try_exit_code) { + /* + * Test case could not finish, we have no idea what state it is + * in, so don't do clean up. + */ + if (try_exit_code == -ETIMEDOUT) { + kunit_err(test, "test case cleanup timed out\n"); + /* + * Unknown internal error occurred preventing test case from + * running, so there is nothing to clean up. + */ + } else { + kunit_err(test, "internal error occurred during test case cleanup: %d\n", + try_exit_code); + } + return; + } + + kunit_err(test, "test aborted during cleanup. continuing without cleaning up\n"); +} + + static void kunit_catch_run_case(void *data) { struct kunit_try_catch_context *ctx = data; struct kunit *test = ctx->test; - struct kunit_suite *suite = ctx->suite; int try_exit_code = kunit_try_catch_get_result(&test->try_catch); if (try_exit_code) { @@ -448,12 +498,6 @@ static void kunit_catch_run_case(void *data) } return; } - - /* - * Test case was run, but aborted. It is the test case's business as to - * whether it failed or not, we just need to clean up. - */ - kunit_run_case_cleanup(test, suite); } /* @@ -478,6 +522,13 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite, context.test_case = test_case; kunit_try_catch_run(try_catch, &context); + /* Now run the cleanup */ + kunit_try_catch_init(try_catch, + test, + kunit_try_run_case_cleanup, + kunit_catch_run_case_cleanup); + kunit_try_catch_run(try_catch, &context); + /* Propagate the parameter result to the test case. */ if (test->status == KUNIT_FAILURE) test_case->status = KUNIT_FAILURE; @@ -585,11 +636,11 @@ int kunit_run_tests(struct kunit_suite *suite) "param-%d", test.param_index); } - kunit_log(KERN_INFO, &test, - KUNIT_SUBTEST_INDENT KUNIT_SUBTEST_INDENT - "%s %d %s", - kunit_status_to_ok_not_ok(test.status), - test.param_index + 1, param_desc); + kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE_PARAM, + test.status, + test.param_index + 1, + param_desc, + test.status_comment); /* Get next param. */ param_desc[0] = '\0'; @@ -603,7 +654,7 @@ int kunit_run_tests(struct kunit_suite *suite) kunit_print_test_stats(&test, param_stats); - kunit_print_ok_not_ok(&test, true, test_case->status, + kunit_print_ok_not_ok(&test, KUNIT_LEVEL_CASE, test_case->status, kunit_test_case_num(suite, test_case), test_case->name, test.status_comment); @@ -712,58 +763,28 @@ static struct notifier_block kunit_mod_nb = { }; #endif -struct kunit_kmalloc_array_params { - size_t n; - size_t size; - gfp_t gfp; -}; - -static int kunit_kmalloc_array_init(struct kunit_resource *res, void *context) +void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp) { - struct kunit_kmalloc_array_params *params = context; + void *data; - res->data = kmalloc_array(params->n, params->size, params->gfp); - if (!res->data) - return -ENOMEM; + data = kmalloc_array(n, size, gfp); - return 0; -} + if (!data) + return NULL; -static void kunit_kmalloc_array_free(struct kunit_resource *res) -{ - kfree(res->data); -} - -void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp) -{ - struct kunit_kmalloc_array_params params = { - .size = size, - .n = n, - .gfp = gfp - }; + if (kunit_add_action_or_reset(test, (kunit_action_t *)kfree, data) != 0) + return NULL; - return kunit_alloc_resource(test, - kunit_kmalloc_array_init, - kunit_kmalloc_array_free, - gfp, - ¶ms); + return data; } EXPORT_SYMBOL_GPL(kunit_kmalloc_array); -static inline bool kunit_kfree_match(struct kunit *test, - struct kunit_resource *res, void *match_data) -{ - /* Only match resources allocated with kunit_kmalloc() and friends. */ - return res->free == kunit_kmalloc_array_free && res->data == match_data; -} - void kunit_kfree(struct kunit *test, const void *ptr) { if (!ptr) return; - if (kunit_destroy_resource(test, kunit_kfree_match, (void *)ptr)) - KUNIT_FAIL(test, "kunit_kfree: %px already freed or not allocated by kunit", ptr); + kunit_release_action(test, (kunit_action_t *)kfree, (void *)ptr); } EXPORT_SYMBOL_GPL(kunit_kfree); diff --git a/lib/maple_tree.c b/lib/maple_tree.c index 8ebc43d4cc8c..bfffbb7cab26 100644 --- a/lib/maple_tree.c +++ b/lib/maple_tree.c @@ -194,7 +194,7 @@ static void mas_set_height(struct ma_state *mas) unsigned int new_flags = mas->tree->ma_flags; new_flags &= ~MT_FLAGS_HEIGHT_MASK; - BUG_ON(mas->depth > MAPLE_HEIGHT_MAX); + MAS_BUG_ON(mas, mas->depth > MAPLE_HEIGHT_MAX); new_flags |= mas->depth << MT_FLAGS_HEIGHT_OFFSET; mas->tree->ma_flags = new_flags; } @@ -240,12 +240,12 @@ static inline void mas_set_err(struct ma_state *mas, long err) mas->node = MA_ERROR(err); } -static inline bool mas_is_ptr(struct ma_state *mas) +static inline bool mas_is_ptr(const struct ma_state *mas) { return mas->node == MAS_ROOT; } -static inline bool mas_is_start(struct ma_state *mas) +static inline bool mas_is_start(const struct ma_state *mas) { return mas->node == MAS_START; } @@ -425,28 +425,26 @@ static inline unsigned long mte_parent_slot_mask(unsigned long parent) } /* - * mas_parent_enum() - Return the maple_type of the parent from the stored + * mas_parent_type() - Return the maple_type of the parent from the stored * parent type. * @mas: The maple state - * @node: The maple_enode to extract the parent's enum + * @enode: The maple_enode to extract the parent's enum * Return: The node->parent maple_type */ static inline -enum maple_type mte_parent_enum(struct maple_enode *p_enode, - struct maple_tree *mt) +enum maple_type mas_parent_type(struct ma_state *mas, struct maple_enode *enode) { unsigned long p_type; - p_type = (unsigned long)p_enode; - if (p_type & MAPLE_PARENT_ROOT) - return 0; /* Validated in the caller. */ + p_type = (unsigned long)mte_to_node(enode)->parent; + if (WARN_ON(p_type & MAPLE_PARENT_ROOT)) + return 0; p_type &= MAPLE_NODE_MASK; - p_type = p_type & ~(MAPLE_PARENT_ROOT | mte_parent_slot_mask(p_type)); - + p_type &= ~mte_parent_slot_mask(p_type); switch (p_type) { case MAPLE_PARENT_RANGE64: /* or MAPLE_PARENT_ARANGE64 */ - if (mt_is_alloc(mt)) + if (mt_is_alloc(mas->tree)) return maple_arange_64; return maple_range_64; } @@ -454,14 +452,8 @@ enum maple_type mte_parent_enum(struct maple_enode *p_enode, return 0; } -static inline -enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode) -{ - return mte_parent_enum(ma_enode_ptr(mte_to_node(enode)->parent), mas->tree); -} - /* - * mte_set_parent() - Set the parent node and encode the slot + * mas_set_parent() - Set the parent node and encode the slot * @enode: The encoded maple node. * @parent: The encoded maple node that is the parent of @enode. * @slot: The slot that @enode resides in @parent. @@ -470,16 +462,16 @@ enum maple_type mas_parent_enum(struct ma_state *mas, struct maple_enode *enode) * parent type. */ static inline -void mte_set_parent(struct maple_enode *enode, const struct maple_enode *parent, - unsigned char slot) +void mas_set_parent(struct ma_state *mas, struct maple_enode *enode, + const struct maple_enode *parent, unsigned char slot) { unsigned long val = (unsigned long)parent; unsigned long shift; unsigned long type; enum maple_type p_type = mte_node_type(parent); - BUG_ON(p_type == maple_dense); - BUG_ON(p_type == maple_leaf_64); + MAS_BUG_ON(mas, p_type == maple_dense); + MAS_BUG_ON(mas, p_type == maple_leaf_64); switch (p_type) { case maple_range_64: @@ -671,22 +663,22 @@ static inline unsigned long *ma_gaps(struct maple_node *node, } /* - * mte_pivot() - Get the pivot at @piv of the maple encoded node. - * @mn: The maple encoded node. + * mas_pivot() - Get the pivot at @piv of the maple encoded node. + * @mas: The maple state. * @piv: The pivot. * * Return: the pivot at @piv of @mn. */ -static inline unsigned long mte_pivot(const struct maple_enode *mn, - unsigned char piv) +static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv) { - struct maple_node *node = mte_to_node(mn); - enum maple_type type = mte_node_type(mn); + struct maple_node *node = mas_mn(mas); + enum maple_type type = mte_node_type(mas->node); - if (piv >= mt_pivots[type]) { - WARN_ON(1); + if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) { + mas_set_err(mas, -EIO); return 0; } + switch (type) { case maple_arange_64: return node->ma64.pivot[piv]; @@ -971,8 +963,6 @@ static inline unsigned char ma_meta_end(struct maple_node *mn, static inline unsigned char ma_meta_gap(struct maple_node *mn, enum maple_type mt) { - BUG_ON(mt != maple_arange_64); - return mn->ma64.meta.gap; } @@ -1111,7 +1101,6 @@ static int mas_ascend(struct ma_state *mas) enum maple_type a_type; unsigned long min, max; unsigned long *pivots; - unsigned char offset; bool set_max = false, set_min = false; a_node = mas_mn(mas); @@ -1123,8 +1112,9 @@ static int mas_ascend(struct ma_state *mas) p_node = mte_parent(mas->node); if (unlikely(a_node == p_node)) return 1; - a_type = mas_parent_enum(mas, mas->node); - offset = mte_parent_slot(mas->node); + + a_type = mas_parent_type(mas, mas->node); + mas->offset = mte_parent_slot(mas->node); a_enode = mt_mk_node(p_node, a_type); /* Check to make sure all parent information is still accurate */ @@ -1132,7 +1122,6 @@ static int mas_ascend(struct ma_state *mas) return 1; mas->node = a_enode; - mas->offset = offset; if (mte_is_root(a_enode)) { mas->max = ULONG_MAX; @@ -1140,11 +1129,17 @@ static int mas_ascend(struct ma_state *mas) return 0; } + if (!mas->min) + set_min = true; + + if (mas->max == ULONG_MAX) + set_max = true; + min = 0; max = ULONG_MAX; do { p_enode = a_enode; - a_type = mas_parent_enum(mas, p_enode); + a_type = mas_parent_type(mas, p_enode); a_node = mte_parent(p_enode); a_slot = mte_parent_slot(p_enode); a_enode = mt_mk_node(a_node, a_type); @@ -1401,9 +1396,9 @@ static inline struct maple_enode *mas_start(struct ma_state *mas) mas->min = 0; mas->max = ULONG_MAX; - mas->depth = 0; retry: + mas->depth = 0; root = mas_root(mas); /* Tree with nodes */ if (likely(xa_is_node(root))) { @@ -1631,6 +1626,7 @@ static inline unsigned long mas_max_gap(struct ma_state *mas) return mas_leaf_max_gap(mas); node = mas_mn(mas); + MAS_BUG_ON(mas, mt != maple_arange_64); offset = ma_meta_gap(node, mt); if (offset == MAPLE_ARANGE64_META_MAX) return 0; @@ -1659,11 +1655,12 @@ static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset, enum maple_type pmt; pnode = mte_parent(mas->node); - pmt = mas_parent_enum(mas, mas->node); + pmt = mas_parent_type(mas, mas->node); penode = mt_mk_node(pnode, pmt); pgaps = ma_gaps(pnode, pmt); ascend: + MAS_BUG_ON(mas, pmt != maple_arange_64); meta_offset = ma_meta_gap(pnode, pmt); if (meta_offset == MAPLE_ARANGE64_META_MAX) meta_gap = 0; @@ -1691,7 +1688,7 @@ ascend: /* Go to the parent node. */ pnode = mte_parent(penode); - pmt = mas_parent_enum(mas, penode); + pmt = mas_parent_type(mas, penode); pgaps = ma_gaps(pnode, pmt); offset = mte_parent_slot(penode); penode = mt_mk_node(pnode, pmt); @@ -1718,7 +1715,7 @@ static inline void mas_update_gap(struct ma_state *mas) pslot = mte_parent_slot(mas->node); p_gap = ma_gaps(mte_parent(mas->node), - mas_parent_enum(mas, mas->node))[pslot]; + mas_parent_type(mas, mas->node))[pslot]; if (p_gap != max_gap) mas_parent_gap(mas, pslot, max_gap); @@ -1743,7 +1740,7 @@ static inline void mas_adopt_children(struct ma_state *mas, offset = ma_data_end(node, type, pivots, mas->max); do { child = mas_slot_locked(mas, slots, offset); - mte_set_parent(child, parent, offset); + mas_set_parent(mas, child, parent, offset); } while (offset--); } @@ -1755,7 +1752,7 @@ static inline void mas_adopt_children(struct ma_state *mas, * leave the node (true) and handle the adoption and free elsewhere. */ static inline void mas_replace(struct ma_state *mas, bool advanced) - __must_hold(mas->tree->lock) + __must_hold(mas->tree->ma_lock) { struct maple_node *mn = mas_mn(mas); struct maple_enode *old_enode; @@ -1767,7 +1764,7 @@ static inline void mas_replace(struct ma_state *mas, bool advanced) } else { offset = mte_parent_slot(mas->node); slots = ma_slots(mte_parent(mas->node), - mas_parent_enum(mas, mas->node)); + mas_parent_type(mas, mas->node)); old_enode = mas_slot_locked(mas, slots, offset); } @@ -1795,7 +1792,7 @@ static inline void mas_replace(struct ma_state *mas, bool advanced) * @child: the maple state to store the child. */ static inline bool mas_new_child(struct ma_state *mas, struct ma_state *child) - __must_hold(mas->tree->lock) + __must_hold(mas->tree->ma_lock) { enum maple_type mt; unsigned char offset; @@ -1943,8 +1940,9 @@ static inline int mab_calc_split(struct ma_state *mas, * causes one node to be deficient. * NOTE: mt_min_slots is 1 based, b_end and split are zero. */ - while (((bn->pivot[split] - min) < slot_count - 1) && - (split < slot_count - 1) && (b_end - split > slot_min)) + while ((split < slot_count - 1) && + ((bn->pivot[split] - min) < slot_count - 1) && + (b_end - split > slot_min)) split++; } @@ -2347,7 +2345,8 @@ static inline void mas_topiary_range(struct ma_state *mas, void __rcu **slots; unsigned char offset; - MT_BUG_ON(mas->tree, mte_is_leaf(mas->node)); + MAS_BUG_ON(mas, mte_is_leaf(mas->node)); + slots = ma_slots(mas_mn(mas), mte_node_type(mas->node)); for (offset = start; offset <= end; offset++) { struct maple_enode *enode = mas_slot_locked(mas, slots, offset); @@ -2707,9 +2706,9 @@ static inline void mas_set_split_parent(struct ma_state *mas, return; if ((*slot) <= split) - mte_set_parent(mas->node, left, *slot); + mas_set_parent(mas, mas->node, left, *slot); else if (right) - mte_set_parent(mas->node, right, (*slot) - split - 1); + mas_set_parent(mas, mas->node, right, (*slot) - split - 1); (*slot)++; } @@ -3106,12 +3105,12 @@ static int mas_spanning_rebalance(struct ma_state *mas, mte_node_type(mast->orig_l->node)); mast->orig_l->depth++; mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, &l_mas, true); - mte_set_parent(left, l_mas.node, slot); + mas_set_parent(mas, left, l_mas.node, slot); if (middle) - mte_set_parent(middle, l_mas.node, ++slot); + mas_set_parent(mas, middle, l_mas.node, ++slot); if (right) - mte_set_parent(right, l_mas.node, ++slot); + mas_set_parent(mas, right, l_mas.node, ++slot); if (mas_is_root_limits(mast->l)) { new_root: @@ -3250,7 +3249,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end l_mas.max = l_pivs[split]; mas->min = l_mas.max + 1; eparent = mt_mk_node(mte_parent(l_mas.node), - mas_parent_enum(&l_mas, l_mas.node)); + mas_parent_type(&l_mas, l_mas.node)); tmp += end; if (!in_rcu) { unsigned char max_p = mt_pivots[mt]; @@ -3293,7 +3292,7 @@ static inline void mas_destroy_rebalance(struct ma_state *mas, unsigned char end /* replace parent. */ offset = mte_parent_slot(mas->node); - mt = mas_parent_enum(&l_mas, l_mas.node); + mt = mas_parent_type(&l_mas, l_mas.node); parent = mas_pop_node(mas); slots = ma_slots(parent, mt); pivs = ma_pivots(parent, mt); @@ -3338,8 +3337,8 @@ static inline bool mas_split_final_node(struct maple_subtree_state *mast, * The Big_node data should just fit in a single node. */ ancestor = mas_new_ma_node(mas, mast->bn); - mte_set_parent(mast->l->node, ancestor, mast->l->offset); - mte_set_parent(mast->r->node, ancestor, mast->r->offset); + mas_set_parent(mas, mast->l->node, ancestor, mast->l->offset); + mas_set_parent(mas, mast->r->node, ancestor, mast->r->offset); mte_to_node(ancestor)->parent = mas_mn(mas)->parent; mast->l->node = ancestor; @@ -3729,43 +3728,31 @@ static inline void mas_store_root(struct ma_state *mas, void *entry) */ static bool mas_is_span_wr(struct ma_wr_state *wr_mas) { - unsigned long max; + unsigned long max = wr_mas->r_max; unsigned long last = wr_mas->mas->last; - unsigned long piv = wr_mas->r_max; enum maple_type type = wr_mas->type; void *entry = wr_mas->entry; - /* Contained in this pivot */ - if (piv > last) + /* Contained in this pivot, fast path */ + if (last < max) return false; - max = wr_mas->mas->max; - if (unlikely(ma_is_leaf(type))) { - /* Fits in the node, but may span slots. */ + if (ma_is_leaf(type)) { + max = wr_mas->mas->max; if (last < max) return false; + } - /* Writes to the end of the node but not null. */ - if ((last == max) && entry) - return false; - + if (last == max) { /* - * Writing ULONG_MAX is not a spanning write regardless of the - * value being written as long as the range fits in the node. + * The last entry of leaf node cannot be NULL unless it is the + * rightmost node (writing ULONG_MAX), otherwise it spans slots. */ - if ((last == ULONG_MAX) && (last == max)) - return false; - } else if (piv == last) { - if (entry) - return false; - - /* Detect spanning store wr walk */ - if (last == ULONG_MAX) + if (entry || last == ULONG_MAX) return false; } - trace_ma_write(__func__, wr_mas->mas, piv, entry); - + trace_ma_write(__func__, wr_mas->mas, wr_mas->r_max, entry); return true; } @@ -4087,52 +4074,27 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas) * * Return: True if stored, false otherwise */ -static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) +static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas, + unsigned char new_end) { struct ma_state *mas = wr_mas->mas; void __rcu **dst_slots; unsigned long *dst_pivots; - unsigned char dst_offset; - unsigned char new_end = wr_mas->node_end; - unsigned char offset; - unsigned char node_slots = mt_slots[wr_mas->type]; + unsigned char dst_offset, offset_end = wr_mas->offset_end; struct maple_node reuse, *newnode; - unsigned char copy_size, max_piv = mt_pivots[wr_mas->type]; + unsigned char copy_size, node_pivots = mt_pivots[wr_mas->type]; bool in_rcu = mt_in_rcu(mas->tree); - offset = mas->offset; - if (mas->last == wr_mas->r_max) { - /* runs right to the end of the node */ - if (mas->last == mas->max) - new_end = offset; - /* don't copy this offset */ - wr_mas->offset_end++; - } else if (mas->last < wr_mas->r_max) { - /* new range ends in this range */ - if (unlikely(wr_mas->r_max == ULONG_MAX)) - mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); - - new_end++; - } else { - if (wr_mas->end_piv == mas->last) - wr_mas->offset_end++; - - new_end -= wr_mas->offset_end - offset - 1; - } - - /* new range starts within a range */ - if (wr_mas->r_min < mas->index) - new_end++; - - /* Not enough room */ - if (new_end >= node_slots) - return false; - - /* Not enough data. */ + /* Check if there is enough data. The room is enough. */ if (!mte_is_root(mas->node) && (new_end <= mt_min_slots[wr_mas->type]) && !(mas->mas_flags & MA_STATE_BULK)) return false; + if (mas->last == wr_mas->end_piv) + offset_end++; /* don't copy this offset */ + else if (unlikely(wr_mas->r_max == ULONG_MAX)) + mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type); + /* set up node. */ if (in_rcu) { mas_node_count(mas, 1); @@ -4149,47 +4111,36 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas) dst_pivots = ma_pivots(newnode, wr_mas->type); dst_slots = ma_slots(newnode, wr_mas->type); /* Copy from start to insert point */ - memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * (offset + 1)); - memcpy(dst_slots, wr_mas->slots, sizeof(void *) * (offset + 1)); - dst_offset = offset; + memcpy(dst_pivots, wr_mas->pivots, sizeof(unsigned long) * mas->offset); + memcpy(dst_slots, wr_mas->slots, sizeof(void *) * mas->offset); /* Handle insert of new range starting after old range */ if (wr_mas->r_min < mas->index) { - mas->offset++; - rcu_assign_pointer(dst_slots[dst_offset], wr_mas->content); - dst_pivots[dst_offset++] = mas->index - 1; + rcu_assign_pointer(dst_slots[mas->offset], wr_mas->content); + dst_pivots[mas->offset++] = mas->index - 1; } /* Store the new entry and range end. */ - if (dst_offset < max_piv) - dst_pivots[dst_offset] = mas->last; - mas->offset = dst_offset; - rcu_assign_pointer(dst_slots[dst_offset], wr_mas->entry); + if (mas->offset < node_pivots) + dst_pivots[mas->offset] = mas->last; + rcu_assign_pointer(dst_slots[mas->offset], wr_mas->entry); /* * this range wrote to the end of the node or it overwrote the rest of * the data */ - if (wr_mas->offset_end > wr_mas->node_end || mas->last >= mas->max) { - new_end = dst_offset; + if (offset_end > wr_mas->node_end) goto done; - } - dst_offset++; + dst_offset = mas->offset + 1; /* Copy to the end of node if necessary. */ - copy_size = wr_mas->node_end - wr_mas->offset_end + 1; - memcpy(dst_slots + dst_offset, wr_mas->slots + wr_mas->offset_end, + copy_size = wr_mas->node_end - offset_end + 1; + memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end, sizeof(void *) * copy_size); - if (dst_offset < max_piv) { - if (copy_size > max_piv - dst_offset) - copy_size = max_piv - dst_offset; - - memcpy(dst_pivots + dst_offset, - wr_mas->pivots + wr_mas->offset_end, - sizeof(unsigned long) * copy_size); - } + memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end, + sizeof(unsigned long) * (copy_size - 1)); - if ((wr_mas->node_end == node_slots - 1) && (new_end < node_slots - 1)) + if (new_end < node_pivots) dst_pivots[new_end] = mas->max; done: @@ -4215,59 +4166,46 @@ done: static inline bool mas_wr_slot_store(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; - unsigned long lmax; /* Logical max. */ unsigned char offset = mas->offset; + bool gap = false; - if ((wr_mas->r_max > mas->last) && ((wr_mas->r_min != mas->index) || - (offset != wr_mas->node_end))) - return false; - - if (offset == wr_mas->node_end - 1) - lmax = mas->max; - else - lmax = wr_mas->pivots[offset + 1]; - - /* going to overwrite too many slots. */ - if (lmax < mas->last) + if (wr_mas->offset_end - offset != 1) return false; - if (wr_mas->r_min == mas->index) { - /* overwriting two or more ranges with one. */ - if (lmax == mas->last) - return false; + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset); + gap |= !mt_slot_locked(mas->tree, wr_mas->slots, offset + 1); - /* Overwriting all of offset and a portion of offset + 1. */ + if (mas->index == wr_mas->r_min) { + /* Overwriting the range and over a part of the next range. */ rcu_assign_pointer(wr_mas->slots[offset], wr_mas->entry); wr_mas->pivots[offset] = mas->last; - goto done; + } else { + /* Overwriting a part of the range and over the next range */ + rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); + wr_mas->pivots[offset] = mas->index - 1; + mas->offset++; /* Keep mas accurate. */ } - /* Doesn't end on the next range end. */ - if (lmax != mas->last) - return false; - - /* Overwriting a portion of offset and all of offset + 1 */ - if ((offset + 1 < mt_pivots[wr_mas->type]) && - (wr_mas->entry || wr_mas->pivots[offset + 1])) - wr_mas->pivots[offset + 1] = mas->last; - - rcu_assign_pointer(wr_mas->slots[offset + 1], wr_mas->entry); - wr_mas->pivots[offset] = mas->index - 1; - mas->offset++; /* Keep mas accurate. */ - -done: trace_ma_write(__func__, mas, 0, wr_mas->entry); - mas_update_gap(mas); + /* + * Only update gap when the new entry is empty or there is an empty + * entry in the original two ranges. + */ + if (!wr_mas->entry || gap) + mas_update_gap(mas); + return true; } static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas) { - while ((wr_mas->mas->last > wr_mas->end_piv) && - (wr_mas->offset_end < wr_mas->node_end)) - wr_mas->end_piv = wr_mas->pivots[++wr_mas->offset_end]; + while ((wr_mas->offset_end < wr_mas->node_end) && + (wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end])) + wr_mas->offset_end++; - if (wr_mas->mas->last > wr_mas->end_piv) + if (wr_mas->offset_end < wr_mas->node_end) + wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end]; + else wr_mas->end_piv = wr_mas->mas->max; } @@ -4275,19 +4213,21 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) { struct ma_state *mas = wr_mas->mas; - if (mas->last < wr_mas->end_piv && !wr_mas->slots[wr_mas->offset_end]) + if (!wr_mas->slots[wr_mas->offset_end]) { + /* If this one is null, the next and prev are not */ mas->last = wr_mas->end_piv; - - /* Check next slot(s) if we are overwriting the end */ - if ((mas->last == wr_mas->end_piv) && - (wr_mas->node_end != wr_mas->offset_end) && - !wr_mas->slots[wr_mas->offset_end + 1]) { - wr_mas->offset_end++; - if (wr_mas->offset_end == wr_mas->node_end) - mas->last = mas->max; - else - mas->last = wr_mas->pivots[wr_mas->offset_end]; - wr_mas->end_piv = mas->last; + } else { + /* Check next slot(s) if we are overwriting the end */ + if ((mas->last == wr_mas->end_piv) && + (wr_mas->node_end != wr_mas->offset_end) && + !wr_mas->slots[wr_mas->offset_end + 1]) { + wr_mas->offset_end++; + if (wr_mas->offset_end == wr_mas->node_end) + mas->last = mas->max; + else + mas->last = wr_mas->pivots[wr_mas->offset_end]; + wr_mas->end_piv = mas->last; + } } if (!wr_mas->content) { @@ -4305,6 +4245,27 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas) } } +static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas) +{ + struct ma_state *mas = wr_mas->mas; + unsigned char new_end = wr_mas->node_end + 2; + + new_end -= wr_mas->offset_end - mas->offset; + if (wr_mas->r_min == mas->index) + new_end--; + + if (wr_mas->end_piv == mas->last) + new_end--; + + return new_end; +} + +/* + * mas_wr_append: Attempt to append + * @wr_mas: the maple write state + * + * Return: True if appended, false otherwise + */ static inline bool mas_wr_append(struct ma_wr_state *wr_mas) { unsigned char end = wr_mas->node_end; @@ -4312,34 +4273,30 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas) struct ma_state *mas = wr_mas->mas; unsigned char node_pivots = mt_pivots[wr_mas->type]; - if ((mas->index != wr_mas->r_min) && (mas->last == wr_mas->r_max)) { - if (new_end < node_pivots) - wr_mas->pivots[new_end] = wr_mas->pivots[end]; + if (mas->offset != wr_mas->node_end) + return false; - if (new_end < node_pivots) - ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); + if (new_end < node_pivots) { + wr_mas->pivots[new_end] = wr_mas->pivots[end]; + ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); + } + if (mas->last == wr_mas->r_max) { + /* Append to end of range */ rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->entry); - mas->offset = new_end; wr_mas->pivots[end] = mas->index - 1; - - return true; - } - - if ((mas->index == wr_mas->r_min) && (mas->last < wr_mas->r_max)) { - if (new_end < node_pivots) - wr_mas->pivots[new_end] = wr_mas->pivots[end]; - + mas->offset = new_end; + } else { + /* Append to start of range */ rcu_assign_pointer(wr_mas->slots[new_end], wr_mas->content); - if (new_end < node_pivots) - ma_set_meta(wr_mas->node, maple_leaf_64, 0, new_end); - wr_mas->pivots[end] = mas->last; rcu_assign_pointer(wr_mas->slots[end], wr_mas->entry); - return true; } - return false; + if (!wr_mas->content || !wr_mas->entry) + mas_update_gap(mas); + + return true; } /* @@ -4360,9 +4317,8 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas) static inline void mas_wr_modify(struct ma_wr_state *wr_mas) { - unsigned char node_slots; - unsigned char node_size; struct ma_state *mas = wr_mas->mas; + unsigned char new_end; /* Direct replacement */ if (wr_mas->r_min == mas->index && wr_mas->r_max == mas->last) { @@ -4372,26 +4328,22 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas) return; } - /* Attempt to append */ - node_slots = mt_slots[wr_mas->type]; - node_size = wr_mas->node_end - wr_mas->offset_end + mas->offset + 2; - if (mas->max == ULONG_MAX) - node_size++; - - /* slot and node store will not fit, go to the slow path */ - if (unlikely(node_size >= node_slots)) + /* + * new_end exceeds the size of the maple node and cannot enter the fast + * path. + */ + new_end = mas_wr_new_end(wr_mas); + if (new_end >= mt_slots[wr_mas->type]) goto slow_path; - if (wr_mas->entry && (wr_mas->node_end < node_slots - 1) && - (mas->offset == wr_mas->node_end) && mas_wr_append(wr_mas)) { - if (!wr_mas->content || !wr_mas->entry) - mas_update_gap(mas); + /* Attempt to append */ + if (new_end == wr_mas->node_end + 1 && mas_wr_append(wr_mas)) return; - } - if ((wr_mas->offset_end - mas->offset <= 1) && mas_wr_slot_store(wr_mas)) + if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas)) return; - else if (mas_wr_node_store(wr_mas)) + + if (mas_wr_node_store(wr_mas, new_end)) return; if (mas_is_err(mas)) @@ -4424,7 +4376,6 @@ static inline void *mas_wr_store_entry(struct ma_wr_state *wr_mas) } /* At this point, we are at the leaf node that needs to be altered. */ - wr_mas->end_piv = wr_mas->r_max; mas_wr_end_piv(wr_mas); if (!wr_mas->entry) @@ -4498,6 +4449,25 @@ exists: } +static inline void mas_rewalk(struct ma_state *mas, unsigned long index) +{ +retry: + mas_set(mas, index); + mas_state_walk(mas); + if (mas_is_start(mas)) + goto retry; +} + +static inline bool mas_rewalk_if_dead(struct ma_state *mas, + struct maple_node *node, const unsigned long index) +{ + if (unlikely(ma_dead_node(node))) { + mas_rewalk(mas, index); + return true; + } + return false; +} + /* * mas_prev_node() - Find the prev non-null entry at the same level in the * tree. The prev value will be mas->node[mas->offset] or MAS_NONE. @@ -4513,15 +4483,19 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min) int offset, level; void __rcu **slots; struct maple_node *node; - struct maple_enode *enode; unsigned long *pivots; + unsigned long max; - if (mas_is_none(mas)) - return 0; + node = mas_mn(mas); + if (!mas->min) + goto no_entry; + + max = mas->min - 1; + if (max < min) + goto no_entry; level = 0; do { - node = mas_mn(mas); if (ma_is_root(node)) goto no_entry; @@ -4530,64 +4504,41 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min) return 1; offset = mas->offset; level++; + node = mas_mn(mas); } while (!offset); offset--; mt = mte_node_type(mas->node); - node = mas_mn(mas); - slots = ma_slots(node, mt); - pivots = ma_pivots(node, mt); - if (unlikely(ma_dead_node(node))) - return 1; - - mas->max = pivots[offset]; - if (offset) - mas->min = pivots[offset - 1] + 1; - if (unlikely(ma_dead_node(node))) - return 1; - - if (mas->max < min) - goto no_entry_min; - while (level > 1) { level--; - enode = mas_slot(mas, slots, offset); + slots = ma_slots(node, mt); + mas->node = mas_slot(mas, slots, offset); if (unlikely(ma_dead_node(node))) return 1; - mas->node = enode; mt = mte_node_type(mas->node); node = mas_mn(mas); - slots = ma_slots(node, mt); pivots = ma_pivots(node, mt); - offset = ma_data_end(node, mt, pivots, mas->max); + offset = ma_data_end(node, mt, pivots, max); if (unlikely(ma_dead_node(node))) return 1; - - if (offset) - mas->min = pivots[offset - 1] + 1; - - if (offset < mt_pivots[mt]) - mas->max = pivots[offset]; - - if (mas->max < min) - goto no_entry; } + slots = ma_slots(node, mt); mas->node = mas_slot(mas, slots, offset); + pivots = ma_pivots(node, mt); if (unlikely(ma_dead_node(node))) return 1; + if (likely(offset)) + mas->min = pivots[offset - 1] + 1; + mas->max = max; mas->offset = mas_data_end(mas); if (unlikely(mte_dead_node(mas->node))) return 1; return 0; -no_entry_min: - mas->offset = offset; - if (offset) - mas->min = pivots[offset - 1] + 1; no_entry: if (unlikely(ma_dead_node(node))) return 1; @@ -4597,6 +4548,76 @@ no_entry: } /* + * mas_prev_slot() - Get the entry in the previous slot + * + * @mas: The maple state + * @max: The minimum starting range + * + * Return: The entry in the previous slot which is possibly NULL + */ +static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty) +{ + void *entry; + void __rcu **slots; + unsigned long pivot; + enum maple_type type; + unsigned long *pivots; + struct maple_node *node; + unsigned long save_point = mas->index; + +retry: + node = mas_mn(mas); + type = mte_node_type(mas->node); + pivots = ma_pivots(node, type); + if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) + goto retry; + +again: + if (mas->min <= min) { + pivot = mas_safe_min(mas, pivots, mas->offset); + + if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) + goto retry; + + if (pivot <= min) + return NULL; + } + + if (likely(mas->offset)) { + mas->offset--; + mas->last = mas->index - 1; + mas->index = mas_safe_min(mas, pivots, mas->offset); + } else { + if (mas_prev_node(mas, min)) { + mas_rewalk(mas, save_point); + goto retry; + } + + if (mas_is_none(mas)) + return NULL; + + mas->last = mas->max; + node = mas_mn(mas); + type = mte_node_type(mas->node); + pivots = ma_pivots(node, type); + mas->index = pivots[mas->offset - 1] + 1; + } + + slots = ma_slots(node, type); + entry = mas_slot(mas, slots, mas->offset); + if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) + goto retry; + + if (likely(entry)) + return entry; + + if (!empty) + goto again; + + return entry; +} + +/* * mas_next_node() - Get the next node at the same level in the tree. * @mas: The maple state * @max: The maximum pivot value to check. @@ -4607,11 +4628,10 @@ no_entry: static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, unsigned long max) { - unsigned long min, pivot; + unsigned long min; unsigned long *pivots; struct maple_enode *enode; int level = 0; - unsigned char offset; unsigned char node_end; enum maple_type mt; void __rcu **slots; @@ -4619,19 +4639,16 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, if (mas->max >= max) goto no_entry; + min = mas->max + 1; level = 0; do { if (ma_is_root(node)) goto no_entry; - min = mas->max + 1; - if (min > max) - goto no_entry; - + /* Walk up. */ if (unlikely(mas_ascend(mas))) return 1; - offset = mas->offset; level++; node = mas_mn(mas); mt = mte_node_type(mas->node); @@ -4640,36 +4657,37 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node, if (unlikely(ma_dead_node(node))) return 1; - } while (unlikely(offset == node_end)); + } while (unlikely(mas->offset == node_end)); slots = ma_slots(node, mt); - pivot = mas_safe_pivot(mas, pivots, ++offset, mt); - while (unlikely(level > 1)) { - /* Descend, if necessary */ - enode = mas_slot(mas, slots, offset); - if (unlikely(ma_dead_node(node))) - return 1; + mas->offset++; + enode = mas_slot(mas, slots, mas->offset); + if (unlikely(ma_dead_node(node))) + return 1; - mas->node = enode; + if (level > 1) + mas->offset = 0; + + while (unlikely(level > 1)) { level--; + mas->node = enode; node = mas_mn(mas); mt = mte_node_type(mas->node); slots = ma_slots(node, mt); - pivots = ma_pivots(node, mt); + enode = mas_slot(mas, slots, 0); if (unlikely(ma_dead_node(node))) return 1; - - offset = 0; - pivot = pivots[0]; } - enode = mas_slot(mas, slots, offset); + if (!mas->offset) + pivots = ma_pivots(node, mt); + + mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt); if (unlikely(ma_dead_node(node))) return 1; mas->node = enode; mas->min = min; - mas->max = pivot; return 0; no_entry: @@ -4681,92 +4699,88 @@ no_entry: } /* - * mas_next_nentry() - Get the next node entry - * @mas: The maple state - * @max: The maximum value to check - * @*range_start: Pointer to store the start of the range. + * mas_next_slot() - Get the entry in the next slot * - * Sets @mas->offset to the offset of the next node entry, @mas->last to the - * pivot of the entry. + * @mas: The maple state + * @max: The maximum starting range + * @empty: Can be empty * - * Return: The next entry, %NULL otherwise + * Return: The entry in the next slot which is possibly NULL */ -static inline void *mas_next_nentry(struct ma_state *mas, - struct maple_node *node, unsigned long max, enum maple_type type) +static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty) { - unsigned char count; - unsigned long pivot; - unsigned long *pivots; void __rcu **slots; + unsigned long *pivots; + unsigned long pivot; + enum maple_type type; + struct maple_node *node; + unsigned char data_end; + unsigned long save_point = mas->last; void *entry; - if (mas->last == mas->max) { - mas->index = mas->max; - return NULL; - } - - slots = ma_slots(node, type); +retry: + node = mas_mn(mas); + type = mte_node_type(mas->node); pivots = ma_pivots(node, type); - count = ma_data_end(node, type, pivots, mas->max); - if (unlikely(ma_dead_node(node))) - return NULL; - - mas->index = mas_safe_min(mas, pivots, mas->offset); - if (unlikely(ma_dead_node(node))) - return NULL; - - if (mas->index > max) - return NULL; - - if (mas->offset > count) - return NULL; + data_end = ma_data_end(node, type, pivots, mas->max); + if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) + goto retry; - while (mas->offset < count) { - pivot = pivots[mas->offset]; - entry = mas_slot(mas, slots, mas->offset); - if (ma_dead_node(node)) - return NULL; +again: + if (mas->max >= max) { + if (likely(mas->offset < data_end)) + pivot = pivots[mas->offset]; + else + return NULL; /* must be mas->max */ - if (entry) - goto found; + if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) + goto retry; if (pivot >= max) return NULL; + } - mas->index = pivot + 1; + if (likely(mas->offset < data_end)) { + mas->index = pivots[mas->offset] + 1; mas->offset++; - } + if (likely(mas->offset < data_end)) + mas->last = pivots[mas->offset]; + else + mas->last = mas->max; + } else { + if (mas_next_node(mas, node, max)) { + mas_rewalk(mas, save_point); + goto retry; + } - if (mas->index > mas->max) { - mas->index = mas->last; - return NULL; + if (mas_is_none(mas)) + return NULL; + + mas->offset = 0; + mas->index = mas->min; + node = mas_mn(mas); + type = mte_node_type(mas->node); + pivots = ma_pivots(node, type); + mas->last = pivots[0]; } - pivot = mas_safe_pivot(mas, pivots, mas->offset, type); - entry = mas_slot(mas, slots, mas->offset); - if (ma_dead_node(node)) - return NULL; + slots = ma_slots(node, type); + entry = mt_slot(mas->tree, slots, mas->offset); + if (unlikely(mas_rewalk_if_dead(mas, node, save_point))) + goto retry; - if (!pivot) - return NULL; + if (entry) + return entry; - if (!entry) - return NULL; + if (!empty) { + if (!mas->offset) + data_end = 2; + goto again; + } -found: - mas->last = pivot; return entry; } -static inline void mas_rewalk(struct ma_state *mas, unsigned long index) -{ -retry: - mas_set(mas, index); - mas_state_walk(mas); - if (mas_is_start(mas)) - goto retry; -} - /* * mas_next_entry() - Internal function to get the next entry. * @mas: The maple state @@ -4781,155 +4795,10 @@ retry: */ static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit) { - void *entry = NULL; - struct maple_enode *prev_node; - struct maple_node *node; - unsigned char offset; - unsigned long last; - enum maple_type mt; - - if (mas->index > limit) { - mas->index = mas->last = limit; - mas_pause(mas); + if (mas->last >= limit) return NULL; - } - last = mas->last; -retry: - offset = mas->offset; - prev_node = mas->node; - node = mas_mn(mas); - mt = mte_node_type(mas->node); - mas->offset++; - if (unlikely(mas->offset >= mt_slots[mt])) { - mas->offset = mt_slots[mt] - 1; - goto next_node; - } - - while (!mas_is_none(mas)) { - entry = mas_next_nentry(mas, node, limit, mt); - if (unlikely(ma_dead_node(node))) { - mas_rewalk(mas, last); - goto retry; - } - if (likely(entry)) - return entry; - - if (unlikely((mas->index > limit))) - break; - -next_node: - prev_node = mas->node; - offset = mas->offset; - if (unlikely(mas_next_node(mas, node, limit))) { - mas_rewalk(mas, last); - goto retry; - } - mas->offset = 0; - node = mas_mn(mas); - mt = mte_node_type(mas->node); - } - - mas->index = mas->last = limit; - mas->offset = offset; - mas->node = prev_node; - return NULL; -} - -/* - * mas_prev_nentry() - Get the previous node entry. - * @mas: The maple state. - * @limit: The lower limit to check for a value. - * - * Return: the entry, %NULL otherwise. - */ -static inline void *mas_prev_nentry(struct ma_state *mas, unsigned long limit, - unsigned long index) -{ - unsigned long pivot, min; - unsigned char offset; - struct maple_node *mn; - enum maple_type mt; - unsigned long *pivots; - void __rcu **slots; - void *entry; - -retry: - if (!mas->offset) - return NULL; - - mn = mas_mn(mas); - mt = mte_node_type(mas->node); - offset = mas->offset - 1; - if (offset >= mt_slots[mt]) - offset = mt_slots[mt] - 1; - - slots = ma_slots(mn, mt); - pivots = ma_pivots(mn, mt); - if (unlikely(ma_dead_node(mn))) { - mas_rewalk(mas, index); - goto retry; - } - - if (offset == mt_pivots[mt]) - pivot = mas->max; - else - pivot = pivots[offset]; - - if (unlikely(ma_dead_node(mn))) { - mas_rewalk(mas, index); - goto retry; - } - - while (offset && ((!mas_slot(mas, slots, offset) && pivot >= limit) || - !pivot)) - pivot = pivots[--offset]; - - min = mas_safe_min(mas, pivots, offset); - entry = mas_slot(mas, slots, offset); - if (unlikely(ma_dead_node(mn))) { - mas_rewalk(mas, index); - goto retry; - } - - if (likely(entry)) { - mas->offset = offset; - mas->last = pivot; - mas->index = min; - } - return entry; -} - -static inline void *mas_prev_entry(struct ma_state *mas, unsigned long min) -{ - void *entry; - - if (mas->index < min) { - mas->index = mas->last = min; - mas->node = MAS_NONE; - return NULL; - } -retry: - while (likely(!mas_is_none(mas))) { - entry = mas_prev_nentry(mas, min, mas->index); - if (unlikely(mas->last < min)) - goto not_found; - - if (likely(entry)) - return entry; - - if (unlikely(mas_prev_node(mas, min))) { - mas_rewalk(mas, mas->index); - goto retry; - } - - mas->offset++; - } - - mas->offset--; -not_found: - mas->index = mas->last = min; - return NULL; + return mas_next_slot(mas, limit, false); } /* @@ -5105,24 +4974,25 @@ void *mas_walk(struct ma_state *mas) { void *entry; + if (mas_is_none(mas) || mas_is_paused(mas) || mas_is_ptr(mas)) + mas->node = MAS_START; retry: entry = mas_state_walk(mas); - if (mas_is_start(mas)) + if (mas_is_start(mas)) { goto retry; - - if (mas_is_ptr(mas)) { + } else if (mas_is_none(mas)) { + mas->index = 0; + mas->last = ULONG_MAX; + } else if (mas_is_ptr(mas)) { if (!mas->index) { mas->last = 0; - } else { - mas->index = 1; - mas->last = ULONG_MAX; + return entry; } - return entry; - } - if (mas_is_none(mas)) { - mas->index = 0; + mas->index = 1; mas->last = ULONG_MAX; + mas->node = MAS_NONE; + return NULL; } return entry; @@ -5202,46 +5072,6 @@ static inline void mas_awalk(struct ma_state *mas, unsigned long size) } /* - * mas_fill_gap() - Fill a located gap with @entry. - * @mas: The maple state - * @entry: The value to store - * @slot: The offset into the node to store the @entry - * @size: The size of the entry - * @index: The start location - */ -static inline void mas_fill_gap(struct ma_state *mas, void *entry, - unsigned char slot, unsigned long size, unsigned long *index) -{ - MA_WR_STATE(wr_mas, mas, entry); - unsigned char pslot = mte_parent_slot(mas->node); - struct maple_enode *mn = mas->node; - unsigned long *pivots; - enum maple_type ptype; - /* - * mas->index is the start address for the search - * which may no longer be needed. - * mas->last is the end address for the search - */ - - *index = mas->index; - mas->last = mas->index + size - 1; - - /* - * It is possible that using mas->max and mas->min to correctly - * calculate the index and last will cause an issue in the gap - * calculation, so fix the ma_state here - */ - mas_ascend(mas); - ptype = mte_node_type(mas->node); - pivots = ma_pivots(mas_mn(mas), ptype); - mas->max = mas_safe_pivot(mas, pivots, pslot, ptype); - mas->min = mas_safe_min(mas, pivots, pslot); - mas->node = mn; - mas->offset = slot; - mas_wr_store_entry(&wr_mas); -} - -/* * mas_sparse_area() - Internal function. Return upper or lower limit when * searching for a gap in an empty tree. * @mas: The maple state @@ -5289,7 +5119,10 @@ int mas_empty_area(struct ma_state *mas, unsigned long min, unsigned long *pivots; enum maple_type mt; - if (min >= max) + if (min > max) + return -EINVAL; + + if (size == 0 || max - min < size - 1) return -EINVAL; if (mas_is_start(mas)) @@ -5338,7 +5171,10 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, { struct maple_enode *last = mas->node; - if (min >= max) + if (min > max) + return -EINVAL; + + if (size == 0 || max - min < size - 1) return -EINVAL; if (mas_is_start(mas)) { @@ -5374,7 +5210,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, return -EBUSY; /* Trim the upper limit to the max. */ - if (max <= mas->last) + if (max < mas->last) mas->last = max; mas->index = mas->last - size + 1; @@ -5382,71 +5218,6 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min, } EXPORT_SYMBOL_GPL(mas_empty_area_rev); -static inline int mas_alloc(struct ma_state *mas, void *entry, - unsigned long size, unsigned long *index) -{ - unsigned long min; - - mas_start(mas); - if (mas_is_none(mas) || mas_is_ptr(mas)) { - mas_root_expand(mas, entry); - if (mas_is_err(mas)) - return xa_err(mas->node); - - if (!mas->index) - return mte_pivot(mas->node, 0); - return mte_pivot(mas->node, 1); - } - - /* Must be walking a tree. */ - mas_awalk(mas, size); - if (mas_is_err(mas)) - return xa_err(mas->node); - - if (mas->offset == MAPLE_NODE_SLOTS) - goto no_gap; - - /* - * At this point, mas->node points to the right node and we have an - * offset that has a sufficient gap. - */ - min = mas->min; - if (mas->offset) - min = mte_pivot(mas->node, mas->offset - 1) + 1; - - if (mas->index < min) - mas->index = min; - - mas_fill_gap(mas, entry, mas->offset, size, index); - return 0; - -no_gap: - return -EBUSY; -} - -static inline int mas_rev_alloc(struct ma_state *mas, unsigned long min, - unsigned long max, void *entry, - unsigned long size, unsigned long *index) -{ - int ret = 0; - - ret = mas_empty_area_rev(mas, min, max, size); - if (ret) - return ret; - - if (mas_is_err(mas)) - return xa_err(mas->node); - - if (mas->offset == MAPLE_NODE_SLOTS) - goto no_gap; - - mas_fill_gap(mas, entry, mas->offset, size, index); - return 0; - -no_gap: - return -EBUSY; -} - /* * mte_dead_leaves() - Mark all leaves of a node as dead. * @mas: The maple state @@ -5694,9 +5465,9 @@ void *mas_store(struct ma_state *mas, void *entry) trace_ma_write(__func__, mas, 0, entry); #ifdef CONFIG_DEBUG_MAPLE_TREE - if (mas->index > mas->last) - pr_err("Error %lu > %lu %p\n", mas->index, mas->last, entry); - MT_BUG_ON(mas->tree, mas->index > mas->last); + if (MAS_WARN_ON(mas, mas->index > mas->last)) + pr_err("Error %lX > %lX %p\n", mas->index, mas->last, entry); + if (mas->index > mas->last) { mas_set_err(mas, -EINVAL); return NULL; @@ -5756,7 +5527,7 @@ void mas_store_prealloc(struct ma_state *mas, void *entry) mas_wr_store_setup(&wr_mas); trace_ma_write(__func__, mas, 0, entry); mas_wr_store_entry(&wr_mas); - BUG_ON(mas_is_err(mas)); + MAS_WR_BUG_ON(&wr_mas, mas_is_err(mas)); mas_destroy(mas); } EXPORT_SYMBOL_GPL(mas_store_prealloc); @@ -5808,9 +5579,7 @@ void mas_destroy(struct ma_state *mas) if (mas->mas_flags & MA_STATE_REBALANCE) { unsigned char end; - if (mas_is_start(mas)) - mas_start(mas); - + mas_start(mas); mtree_range_walk(mas); end = mas_data_end(mas) + 1; if (end < mt_min_slot_count(mas->node) - 1) @@ -5900,6 +5669,34 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries) } EXPORT_SYMBOL_GPL(mas_expected_entries); +static inline bool mas_next_setup(struct ma_state *mas, unsigned long max, + void **entry) +{ + bool was_none = mas_is_none(mas); + + if (mas_is_none(mas) || mas_is_paused(mas)) + mas->node = MAS_START; + + if (mas_is_start(mas)) + *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ + + if (mas_is_ptr(mas)) { + *entry = NULL; + if (was_none && mas->index == 0) { + mas->index = mas->last = 0; + return true; + } + mas->index = 1; + mas->last = ULONG_MAX; + mas->node = MAS_NONE; + return true; + } + + if (mas_is_none(mas)) + return true; + return false; +} + /** * mas_next() - Get the next entry. * @mas: The maple state @@ -5913,27 +5710,38 @@ EXPORT_SYMBOL_GPL(mas_expected_entries); */ void *mas_next(struct ma_state *mas, unsigned long max) { - if (mas_is_none(mas) || mas_is_paused(mas)) - mas->node = MAS_START; + void *entry = NULL; - if (mas_is_start(mas)) - mas_walk(mas); /* Retries on dead nodes handled by mas_walk */ + if (mas_next_setup(mas, max, &entry)) + return entry; - if (mas_is_ptr(mas)) { - if (!mas->index) { - mas->index = 1; - mas->last = ULONG_MAX; - } - return NULL; - } + /* Retries on dead nodes handled by mas_next_slot */ + return mas_next_slot(mas, max, false); +} +EXPORT_SYMBOL_GPL(mas_next); - if (mas->last == ULONG_MAX) - return NULL; +/** + * mas_next_range() - Advance the maple state to the next range + * @mas: The maple state + * @max: The maximum index to check. + * + * Sets @mas->index and @mas->last to the range. + * Must hold rcu_read_lock or the write lock. + * Can return the zero entry. + * + * Return: The next entry or %NULL + */ +void *mas_next_range(struct ma_state *mas, unsigned long max) +{ + void *entry = NULL; + + if (mas_next_setup(mas, max, &entry)) + return entry; - /* Retries on dead nodes handled by mas_next_entry */ - return mas_next_entry(mas, max); + /* Retries on dead nodes handled by mas_next_slot */ + return mas_next_slot(mas, max, true); } -EXPORT_SYMBOL_GPL(mas_next); +EXPORT_SYMBOL_GPL(mas_next_range); /** * mt_next() - get the next value in the maple tree @@ -5955,6 +5763,47 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max) } EXPORT_SYMBOL_GPL(mt_next); +static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min, + void **entry) +{ + if (mas->index <= min) + goto none; + + if (mas_is_none(mas) || mas_is_paused(mas)) + mas->node = MAS_START; + + if (mas_is_start(mas)) { + mas_walk(mas); + if (!mas->index) + goto none; + } + + if (unlikely(mas_is_ptr(mas))) { + if (!mas->index) + goto none; + mas->index = mas->last = 0; + *entry = mas_root(mas); + return true; + } + + if (mas_is_none(mas)) { + if (mas->index) { + /* Walked to out-of-range pointer? */ + mas->index = mas->last = 0; + mas->node = MAS_ROOT; + *entry = mas_root(mas); + return true; + } + return true; + } + + return false; + +none: + mas->node = MAS_NONE; + return true; +} + /** * mas_prev() - Get the previous entry * @mas: The maple state @@ -5968,37 +5817,37 @@ EXPORT_SYMBOL_GPL(mt_next); */ void *mas_prev(struct ma_state *mas, unsigned long min) { - if (!mas->index) { - /* Nothing comes before 0 */ - mas->last = 0; - mas->node = MAS_NONE; - return NULL; - } + void *entry = NULL; - if (unlikely(mas_is_ptr(mas))) - return NULL; + if (mas_prev_setup(mas, min, &entry)) + return entry; - if (mas_is_none(mas) || mas_is_paused(mas)) - mas->node = MAS_START; + return mas_prev_slot(mas, min, false); +} +EXPORT_SYMBOL_GPL(mas_prev); - if (mas_is_start(mas)) { - mas_walk(mas); - if (!mas->index) - return NULL; - } +/** + * mas_prev_range() - Advance to the previous range + * @mas: The maple state + * @min: The minimum value to check. + * + * Sets @mas->index and @mas->last to the range. + * Must hold rcu_read_lock or the write lock. + * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not + * searchable nodes. + * + * Return: the previous value or %NULL. + */ +void *mas_prev_range(struct ma_state *mas, unsigned long min) +{ + void *entry = NULL; - if (mas_is_ptr(mas)) { - if (!mas->index) { - mas->last = 0; - return NULL; - } + if (mas_prev_setup(mas, min, &entry)) + return entry; - mas->index = mas->last = 0; - return mas_root_locked(mas); - } - return mas_prev_entry(mas, min); + return mas_prev_slot(mas, min, true); } -EXPORT_SYMBOL_GPL(mas_prev); +EXPORT_SYMBOL_GPL(mas_prev_range); /** * mt_prev() - get the previous value in the maple tree @@ -6040,6 +5889,64 @@ void mas_pause(struct ma_state *mas) EXPORT_SYMBOL_GPL(mas_pause); /** + * mas_find_setup() - Internal function to set up mas_find*(). + * @mas: The maple state + * @max: The maximum index + * @entry: Pointer to the entry + * + * Returns: True if entry is the answer, false otherwise. + */ +static inline bool mas_find_setup(struct ma_state *mas, unsigned long max, + void **entry) +{ + *entry = NULL; + + if (unlikely(mas_is_none(mas))) { + if (unlikely(mas->last >= max)) + return true; + + mas->index = mas->last; + mas->node = MAS_START; + } else if (unlikely(mas_is_paused(mas))) { + if (unlikely(mas->last >= max)) + return true; + + mas->node = MAS_START; + mas->index = ++mas->last; + } else if (unlikely(mas_is_ptr(mas))) + goto ptr_out_of_range; + + if (unlikely(mas_is_start(mas))) { + /* First run or continue */ + if (mas->index > max) + return true; + + *entry = mas_walk(mas); + if (*entry) + return true; + + } + + if (unlikely(!mas_searchable(mas))) { + if (unlikely(mas_is_ptr(mas))) + goto ptr_out_of_range; + + return true; + } + + if (mas->index == max) + return true; + + return false; + +ptr_out_of_range: + mas->node = MAS_NONE; + mas->index = 1; + mas->last = ULONG_MAX; + return true; +} + +/** * mas_find() - On the first call, find the entry at or after mas->index up to * %max. Otherwise, find the entry after mas->index. * @mas: The maple state @@ -6053,37 +5960,105 @@ EXPORT_SYMBOL_GPL(mas_pause); */ void *mas_find(struct ma_state *mas, unsigned long max) { + void *entry = NULL; + + if (mas_find_setup(mas, max, &entry)) + return entry; + + /* Retries on dead nodes handled by mas_next_slot */ + return mas_next_slot(mas, max, false); +} +EXPORT_SYMBOL_GPL(mas_find); + +/** + * mas_find_range() - On the first call, find the entry at or after + * mas->index up to %max. Otherwise, advance to the next slot mas->index. + * @mas: The maple state + * @max: The maximum value to check. + * + * Must hold rcu_read_lock or the write lock. + * If an entry exists, last and index are updated accordingly. + * May set @mas->node to MAS_NONE. + * + * Return: The entry or %NULL. + */ +void *mas_find_range(struct ma_state *mas, unsigned long max) +{ + void *entry; + + if (mas_find_setup(mas, max, &entry)) + return entry; + + /* Retries on dead nodes handled by mas_next_slot */ + return mas_next_slot(mas, max, true); +} +EXPORT_SYMBOL_GPL(mas_find_range); + +/** + * mas_find_rev_setup() - Internal function to set up mas_find_*_rev() + * @mas: The maple state + * @min: The minimum index + * @entry: Pointer to the entry + * + * Returns: True if entry is the answer, false otherwise. + */ +static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min, + void **entry) +{ + *entry = NULL; + + if (unlikely(mas_is_none(mas))) { + if (mas->index <= min) + goto none; + + mas->last = mas->index; + mas->node = MAS_START; + } + if (unlikely(mas_is_paused(mas))) { - if (unlikely(mas->last == ULONG_MAX)) { + if (unlikely(mas->index <= min)) { mas->node = MAS_NONE; - return NULL; + return true; } mas->node = MAS_START; - mas->index = ++mas->last; + mas->last = --mas->index; } - if (unlikely(mas_is_none(mas))) - mas->node = MAS_START; - if (unlikely(mas_is_start(mas))) { /* First run or continue */ - void *entry; + if (mas->index < min) + return true; - if (mas->index > max) - return NULL; + *entry = mas_walk(mas); + if (*entry) + return true; + } - entry = mas_walk(mas); - if (entry) - return entry; + if (unlikely(!mas_searchable(mas))) { + if (mas_is_ptr(mas)) + goto none; + + if (mas_is_none(mas)) { + /* + * Walked to the location, and there was nothing so the + * previous location is 0. + */ + mas->last = mas->index = 0; + mas->node = MAS_ROOT; + *entry = mas_root(mas); + return true; + } } - if (unlikely(!mas_searchable(mas))) - return NULL; + if (mas->index < min) + return true; + + return false; - /* Retries on dead nodes handled by mas_next_entry */ - return mas_next_entry(mas, max); +none: + mas->node = MAS_NONE; + return true; } -EXPORT_SYMBOL_GPL(mas_find); /** * mas_find_rev: On the first call, find the first non-null entry at or below @@ -6100,37 +6075,41 @@ EXPORT_SYMBOL_GPL(mas_find); */ void *mas_find_rev(struct ma_state *mas, unsigned long min) { - if (unlikely(mas_is_paused(mas))) { - if (unlikely(mas->last == ULONG_MAX)) { - mas->node = MAS_NONE; - return NULL; - } - mas->node = MAS_START; - mas->last = --mas->index; - } + void *entry; - if (unlikely(mas_is_start(mas))) { - /* First run or continue */ - void *entry; + if (mas_find_rev_setup(mas, min, &entry)) + return entry; - if (mas->index < min) - return NULL; + /* Retries on dead nodes handled by mas_prev_slot */ + return mas_prev_slot(mas, min, false); - entry = mas_walk(mas); - if (entry) - return entry; - } +} +EXPORT_SYMBOL_GPL(mas_find_rev); - if (unlikely(!mas_searchable(mas))) - return NULL; +/** + * mas_find_range_rev: On the first call, find the first non-null entry at or + * below mas->index down to %min. Otherwise advance to the previous slot after + * mas->index down to %min. + * @mas: The maple state + * @min: The minimum value to check. + * + * Must hold rcu_read_lock or the write lock. + * If an entry exists, last and index are updated accordingly. + * May set @mas->node to MAS_NONE. + * + * Return: The entry or %NULL. + */ +void *mas_find_range_rev(struct ma_state *mas, unsigned long min) +{ + void *entry; - if (mas->index < min) - return NULL; + if (mas_find_rev_setup(mas, min, &entry)) + return entry; - /* Retries on dead nodes handled by mas_prev_entry */ - return mas_prev_entry(mas, min); + /* Retries on dead nodes handled by mas_prev_slot */ + return mas_prev_slot(mas, min, true); } -EXPORT_SYMBOL_GPL(mas_find_rev); +EXPORT_SYMBOL_GPL(mas_find_range_rev); /** * mas_erase() - Find the range in which index resides and erase the entire @@ -6176,7 +6155,7 @@ EXPORT_SYMBOL_GPL(mas_erase); * Return: true on allocation, false otherwise. */ bool mas_nomem(struct ma_state *mas, gfp_t gfp) - __must_hold(mas->tree->lock) + __must_hold(mas->tree->ma_lock) { if (likely(mas->node != MA_ERROR(-ENOMEM))) { mas_destroy(mas); @@ -6357,31 +6336,33 @@ int mtree_alloc_range(struct maple_tree *mt, unsigned long *startp, { int ret = 0; - MA_STATE(mas, mt, min, max - size); + MA_STATE(mas, mt, 0, 0); if (!mt_is_alloc(mt)) return -EINVAL; if (WARN_ON_ONCE(mt_is_reserved(entry))) return -EINVAL; - if (min > max) - return -EINVAL; - - if (max < size) - return -EINVAL; - - if (!size) - return -EINVAL; - mtree_lock(mt); retry: - mas.offset = 0; - mas.index = min; - mas.last = max - size; - ret = mas_alloc(&mas, entry, size, startp); + ret = mas_empty_area(&mas, min, max, size); + if (ret) + goto unlock; + + mas_insert(&mas, entry); + /* + * mas_nomem() may release the lock, causing the allocated area + * to be unavailable, so try to allocate a free area again. + */ if (mas_nomem(&mas, gfp)) goto retry; + if (mas_is_err(&mas)) + ret = xa_err(mas.node); + else + *startp = mas.index; + +unlock: mtree_unlock(mt); return ret; } @@ -6393,28 +6374,33 @@ int mtree_alloc_rrange(struct maple_tree *mt, unsigned long *startp, { int ret = 0; - MA_STATE(mas, mt, min, max - size); + MA_STATE(mas, mt, 0, 0); if (!mt_is_alloc(mt)) return -EINVAL; if (WARN_ON_ONCE(mt_is_reserved(entry))) return -EINVAL; - if (min >= max) - return -EINVAL; - - if (max < size - 1) - return -EINVAL; - - if (!size) - return -EINVAL; - mtree_lock(mt); retry: - ret = mas_rev_alloc(&mas, min, max, entry, size, startp); + ret = mas_empty_area_rev(&mas, min, max, size); + if (ret) + goto unlock; + + mas_insert(&mas, entry); + /* + * mas_nomem() may release the lock, causing the allocated area + * to be unavailable, so try to allocate a free area again. + */ if (mas_nomem(&mas, gfp)) goto retry; + if (mas_is_err(&mas)) + ret = xa_err(mas.node); + else + *startp = mas.index; + +unlock: mtree_unlock(mt); return ret; } @@ -6512,7 +6498,7 @@ retry: if (entry) goto unlock; - while (mas_searchable(&mas) && (mas.index < max)) { + while (mas_searchable(&mas) && (mas.last < max)) { entry = mas_next_entry(&mas, max); if (likely(entry && !xa_is_zero(entry))) break; @@ -6525,10 +6511,9 @@ unlock: if (likely(entry)) { *index = mas.last + 1; #ifdef CONFIG_DEBUG_MAPLE_TREE - if ((*index) && (*index) <= copy) + if (MT_WARN_ON(mt, (*index) && ((*index) <= copy))) pr_err("index not increased! %lx <= %lx\n", *index, copy); - MT_BUG_ON(mt, (*index) && ((*index) <= copy)); #endif } @@ -6674,7 +6659,7 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, max = mas->max; mas->offset = 0; while (likely(!ma_is_leaf(mt))) { - MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); + MAS_WARN_ON(mas, mte_dead_node(mas->node)); slots = ma_slots(mn, mt); entry = mas_slot(mas, slots, 0); pivots = ma_pivots(mn, mt); @@ -6685,7 +6670,7 @@ static inline void *mas_first_entry(struct ma_state *mas, struct maple_node *mn, mn = mas_mn(mas); mt = mte_node_type(mas->node); } - MT_BUG_ON(mas->tree, mte_dead_node(mas->node)); + MAS_WARN_ON(mas, mte_dead_node(mas->node)); mas->max = max; slots = ma_slots(mn, mt); @@ -6735,15 +6720,12 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) mas->node = mn; mas_ascend(mas); - while (mas->node != MAS_NONE) { + do { p = mas->node; p_min = mas->min; p_max = mas->max; mas_prev_node(mas, 0); - } - - if (p == MAS_NONE) - return; + } while (!mas_is_none(mas)); mas->node = p; mas->max = p_max; @@ -6752,22 +6734,33 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max) /* Tree validations */ static void mt_dump_node(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth); + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format); static void mt_dump_range(unsigned long min, unsigned long max, - unsigned int depth) + unsigned int depth, enum mt_dump_format format) { static const char spaces[] = " "; - if (min == max) - pr_info("%.*s%lu: ", depth * 2, spaces, min); - else - pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); + switch(format) { + case mt_dump_hex: + if (min == max) + pr_info("%.*s%lx: ", depth * 2, spaces, min); + else + pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max); + break; + default: + case mt_dump_dec: + if (min == max) + pr_info("%.*s%lu: ", depth * 2, spaces, min); + else + pr_info("%.*s%lu-%lu: ", depth * 2, spaces, min, max); + } } static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, - unsigned int depth) + unsigned int depth, enum mt_dump_format format) { - mt_dump_range(min, max, depth); + mt_dump_range(min, max, depth, format); if (xa_is_value(entry)) pr_cont("value %ld (0x%lx) [%p]\n", xa_to_value(entry), @@ -6781,7 +6774,8 @@ static void mt_dump_entry(void *entry, unsigned long min, unsigned long max, } static void mt_dump_range64(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth) + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format) { struct maple_range_64 *node = &mte_to_node(entry)->mr64; bool leaf = mte_is_leaf(entry); @@ -6789,8 +6783,16 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, int i; pr_cont(" contents: "); - for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) - pr_cont("%p %lu ", node->slot[i], node->pivot[i]); + for (i = 0; i < MAPLE_RANGE64_SLOTS - 1; i++) { + switch(format) { + case mt_dump_hex: + pr_cont("%p %lX ", node->slot[i], node->pivot[i]); + break; + default: + case mt_dump_dec: + pr_cont("%p %lu ", node->slot[i], node->pivot[i]); + } + } pr_cont("%p\n", node->slot[i]); for (i = 0; i < MAPLE_RANGE64_SLOTS; i++) { unsigned long last = max; @@ -6803,24 +6805,32 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry, break; if (leaf) mt_dump_entry(mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); else if (node->slot[i]) mt_dump_node(mt, mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); if (last == max) break; if (last > max) { - pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + switch(format) { + case mt_dump_hex: + pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n", node, last, max, i); - break; + break; + default: + case mt_dump_dec: + pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n", + node, last, max, i); + } } first = last + 1; } } static void mt_dump_arange64(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth) + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format) { struct maple_arange_64 *node = &mte_to_node(entry)->ma64; bool leaf = mte_is_leaf(entry); @@ -6845,10 +6855,10 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, break; if (leaf) mt_dump_entry(mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); else if (node->slot[i]) mt_dump_node(mt, mt_slot(mt, node->slot, i), - first, last, depth + 1); + first, last, depth + 1, format); if (last == max) break; @@ -6862,13 +6872,14 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry, } static void mt_dump_node(const struct maple_tree *mt, void *entry, - unsigned long min, unsigned long max, unsigned int depth) + unsigned long min, unsigned long max, unsigned int depth, + enum mt_dump_format format) { struct maple_node *node = mte_to_node(entry); unsigned int type = mte_node_type(entry); unsigned int i; - mt_dump_range(min, max, depth); + mt_dump_range(min, max, depth, format); pr_cont("node %p depth %d type %d parent %p", node, depth, type, node ? node->parent : NULL); @@ -6879,15 +6890,15 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry, if (min + i > max) pr_cont("OUT OF RANGE: "); mt_dump_entry(mt_slot(mt, node->slot, i), - min + i, min + i, depth); + min + i, min + i, depth, format); } break; case maple_leaf_64: case maple_range_64: - mt_dump_range64(mt, entry, min, max, depth); + mt_dump_range64(mt, entry, min, max, depth, format); break; case maple_arange_64: - mt_dump_arange64(mt, entry, min, max, depth); + mt_dump_arange64(mt, entry, min, max, depth, format); break; default: @@ -6895,16 +6906,16 @@ static void mt_dump_node(const struct maple_tree *mt, void *entry, } } -void mt_dump(const struct maple_tree *mt) +void mt_dump(const struct maple_tree *mt, enum mt_dump_format format) { void *entry = rcu_dereference_check(mt->ma_root, mt_locked(mt)); pr_info("maple_tree(%p) flags %X, height %u root %p\n", mt, mt->ma_flags, mt_height(mt), entry); if (!xa_is_node(entry)) - mt_dump_entry(entry, 0, 0, 0); + mt_dump_entry(entry, 0, 0, 0, format); else if (entry) - mt_dump_node(mt, entry, 0, mt_node_max(entry), 0); + mt_dump_node(mt, entry, 0, mt_node_max(entry), 0, format); } EXPORT_SYMBOL_GPL(mt_dump); @@ -6957,7 +6968,7 @@ static void mas_validate_gaps(struct ma_state *mas) mas_mn(mas), i, mas_get_slot(mas, i), gap, p_end, p_start); - mt_dump(mas->tree); + mt_dump(mas->tree, mt_dump_hex); MT_BUG_ON(mas->tree, gap != p_end - p_start + 1); @@ -6988,27 +6999,29 @@ counted: p_slot = mte_parent_slot(mas->node); p_mn = mte_parent(mte); MT_BUG_ON(mas->tree, max_gap > mas->max); - if (ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap) { + if (ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap) { pr_err("gap %p[%u] != %lu\n", p_mn, p_slot, max_gap); - mt_dump(mas->tree); + mt_dump(mas->tree, mt_dump_hex); } MT_BUG_ON(mas->tree, - ma_gaps(p_mn, mas_parent_enum(mas, mte))[p_slot] != max_gap); + ma_gaps(p_mn, mas_parent_type(mas, mte))[p_slot] != max_gap); } static void mas_validate_parent_slot(struct ma_state *mas) { struct maple_node *parent; struct maple_enode *node; - enum maple_type p_type = mas_parent_enum(mas, mas->node); - unsigned char p_slot = mte_parent_slot(mas->node); + enum maple_type p_type; + unsigned char p_slot; void __rcu **slots; int i; if (mte_is_root(mas->node)) return; + p_slot = mte_parent_slot(mas->node); + p_type = mas_parent_type(mas, mas->node); parent = mte_parent(mas->node); slots = ma_slots(parent, p_type); MT_BUG_ON(mas->tree, mas_mn(mas) == parent); @@ -7101,18 +7114,18 @@ static void mas_validate_limits(struct ma_state *mas) if (prev_piv > piv) { pr_err("%p[%u] piv %lu < prev_piv %lu\n", mas_mn(mas), i, piv, prev_piv); - MT_BUG_ON(mas->tree, piv < prev_piv); + MAS_WARN_ON(mas, piv < prev_piv); } if (piv < mas->min) { pr_err("%p[%u] %lu < %lu\n", mas_mn(mas), i, piv, mas->min); - MT_BUG_ON(mas->tree, piv < mas->min); + MAS_WARN_ON(mas, piv < mas->min); } if (piv > mas->max) { pr_err("%p[%u] %lu > %lu\n", mas_mn(mas), i, piv, mas->max); - MT_BUG_ON(mas->tree, piv > mas->max); + MAS_WARN_ON(mas, piv > mas->max); } prev_piv = piv; if (piv == mas->max) @@ -7135,7 +7148,7 @@ static void mas_validate_limits(struct ma_state *mas) pr_err("%p[%u] should not have piv %lu\n", mas_mn(mas), i, piv); - MT_BUG_ON(mas->tree, i < mt_pivots[type] - 1); + MAS_WARN_ON(mas, i < mt_pivots[type] - 1); } } } @@ -7194,16 +7207,15 @@ void mt_validate(struct maple_tree *mt) mas_first_entry(&mas, mas_mn(&mas), ULONG_MAX, mte_node_type(mas.node)); while (!mas_is_none(&mas)) { - MT_BUG_ON(mas.tree, mte_dead_node(mas.node)); + MAS_WARN_ON(&mas, mte_dead_node(mas.node)); if (!mte_is_root(mas.node)) { end = mas_data_end(&mas); - if ((end < mt_min_slot_count(mas.node)) && - (mas.max != ULONG_MAX)) { + if (MAS_WARN_ON(&mas, + (end < mt_min_slot_count(mas.node)) && + (mas.max != ULONG_MAX))) { pr_err("Invalid size %u of %p\n", end, - mas_mn(&mas)); - MT_BUG_ON(mas.tree, 1); + mas_mn(&mas)); } - } mas_validate_parent_slot(&mas); mas_validate_child_slot(&mas); @@ -7219,4 +7231,34 @@ done: } EXPORT_SYMBOL_GPL(mt_validate); +void mas_dump(const struct ma_state *mas) +{ + pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node); + if (mas_is_none(mas)) + pr_err("(MAS_NONE) "); + else if (mas_is_ptr(mas)) + pr_err("(MAS_ROOT) "); + else if (mas_is_start(mas)) + pr_err("(MAS_START) "); + else if (mas_is_paused(mas)) + pr_err("(MAS_PAUSED) "); + + pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last); + pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n", + mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags); + if (mas->index > mas->last) + pr_err("Check index & last\n"); +} +EXPORT_SYMBOL_GPL(mas_dump); + +void mas_wr_dump(const struct ma_wr_state *wr_mas) +{ + pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n", + wr_mas->node, wr_mas->r_min, wr_mas->r_max); + pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n", + wr_mas->type, wr_mas->offset_end, wr_mas->node_end, + wr_mas->end_piv); +} +EXPORT_SYMBOL_GPL(mas_wr_dump); + #endif /* CONFIG_DEBUG_MAPLE_TREE */ diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c index dcd3ba102db6..34db0b3aa502 100644 --- a/lib/overflow_kunit.c +++ b/lib/overflow_kunit.c @@ -649,7 +649,7 @@ struct __test_flex_array { static void overflow_size_helpers_test(struct kunit *test) { /* Make sure struct_size() can be used in a constant expression. */ - u8 ce_array[struct_size((struct __test_flex_array *)0, data, 55)]; + u8 ce_array[struct_size_t(struct __test_flex_array, data, 55)]; struct __test_flex_array *obj; int count = 0; int var; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 049ba132f7ef..1a31065b2036 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -27,6 +27,8 @@ #include <linux/string.h> #include <linux/xarray.h> +#include "radix-tree.h" + /* * Radix tree node cache. */ diff --git a/lib/radix-tree.h b/lib/radix-tree.h new file mode 100644 index 000000000000..40d5c03e2b09 --- /dev/null +++ b/lib/radix-tree.h @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* radix-tree helpers that are only shared with xarray */ + +struct kmem_cache; +struct rcu_head; + +extern struct kmem_cache *radix_tree_node_cachep; +extern void radix_tree_node_rcu_free(struct rcu_head *head); diff --git a/lib/raid6/neon.h b/lib/raid6/neon.h new file mode 100644 index 000000000000..2ca41ee9b499 --- /dev/null +++ b/lib/raid6/neon.h @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0-only + +void raid6_neon1_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); +void raid6_neon1_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs); +void raid6_neon2_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); +void raid6_neon2_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs); +void raid6_neon4_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); +void raid6_neon4_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs); +void raid6_neon8_gen_syndrome_real(int disks, unsigned long bytes, void **ptrs); +void raid6_neon8_xor_syndrome_real(int disks, int start, int stop, + unsigned long bytes, void **ptrs); +void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, + uint8_t *dq, const uint8_t *pbmul, + const uint8_t *qmul); + +void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, + const uint8_t *qmul); + + diff --git a/lib/raid6/neon.uc b/lib/raid6/neon.uc index b7c68030da4f..355270af0cd6 100644 --- a/lib/raid6/neon.uc +++ b/lib/raid6/neon.uc @@ -25,6 +25,7 @@ */ #include <arm_neon.h> +#include "neon.h" typedef uint8x16_t unative_t; diff --git a/lib/raid6/recov_neon.c b/lib/raid6/recov_neon.c index d6fba8bf8c0a..1bfc14174d4d 100644 --- a/lib/raid6/recov_neon.c +++ b/lib/raid6/recov_neon.c @@ -8,6 +8,7 @@ #ifdef __KERNEL__ #include <asm/neon.h> +#include "neon.h" #else #define kernel_neon_begin() #define kernel_neon_end() @@ -19,13 +20,6 @@ static int raid6_has_neon(void) return cpu_has_neon(); } -void __raid6_2data_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dp, - uint8_t *dq, const uint8_t *pbmul, - const uint8_t *qmul); - -void __raid6_datap_recov_neon(int bytes, uint8_t *p, uint8_t *q, uint8_t *dq, - const uint8_t *qmul); - static void raid6_2data_recov_neon(int disks, size_t bytes, int faila, int failb, void **ptrs) { diff --git a/lib/raid6/recov_neon_inner.c b/lib/raid6/recov_neon_inner.c index 90eb80d43790..f9e7e8f5a151 100644 --- a/lib/raid6/recov_neon_inner.c +++ b/lib/raid6/recov_neon_inner.c @@ -5,6 +5,7 @@ */ #include <arm_neon.h> +#include "neon.h" #ifdef CONFIG_ARM /* diff --git a/lib/show_mem.c b/lib/show_mem.c deleted file mode 100644 index 1485c87be935..000000000000 --- a/lib/show_mem.c +++ /dev/null @@ -1,37 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Generic show_mem() implementation - * - * Copyright (C) 2008 Johannes Weiner <hannes@saeurebad.de> - */ - -#include <linux/mm.h> -#include <linux/cma.h> - -void __show_mem(unsigned int filter, nodemask_t *nodemask, int max_zone_idx) -{ - unsigned long total = 0, reserved = 0, highmem = 0; - struct zone *zone; - - printk("Mem-Info:\n"); - __show_free_areas(filter, nodemask, max_zone_idx); - - for_each_populated_zone(zone) { - - total += zone->present_pages; - reserved += zone->present_pages - zone_managed_pages(zone); - - if (is_highmem(zone)) - highmem += zone->present_pages; - } - - printk("%lu pages RAM\n", total); - printk("%lu pages HighMem/MovableOnly\n", highmem); - printk("%lu pages reserved\n", reserved); -#ifdef CONFIG_CMA - printk("%lu pages cma reserved\n", totalcma_pages); -#endif -#ifdef CONFIG_MEMORY_FAILURE - printk("%lu pages hwpoisoned\n", atomic_long_read(&num_poisoned_pages)); -#endif -} diff --git a/lib/strcat_kunit.c b/lib/strcat_kunit.c new file mode 100644 index 000000000000..e21be95514af --- /dev/null +++ b/lib/strcat_kunit.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Kernel module for testing 'strcat' family of functions. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <kunit/test.h> +#include <linux/string.h> + +static volatile int unconst; + +static void strcat_test(struct kunit *test) +{ + char dest[8]; + + /* Destination is terminated. */ + memset(dest, 0, sizeof(dest)); + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy does nothing. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "") == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* 4 characters copied in, stops at %NUL. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "four\000123") == dest); + KUNIT_EXPECT_STREQ(test, dest, "four"); + KUNIT_EXPECT_EQ(test, dest[5], '\0'); + /* 2 more characters copied in okay. */ + KUNIT_EXPECT_TRUE(test, strcat(dest, "AB") == dest); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); +} + +static void strncat_test(struct kunit *test) +{ + char dest[8]; + + /* Destination is terminated. */ + memset(dest, 0, sizeof(dest)); + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy of size 0 does nothing. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "", 0 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Empty copy of size 1 does nothing too. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "", 1 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Copy of max 0 characters should do nothing. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "asdf", 0 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, ""); + + /* 4 characters copied in, even if max is 8. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "four\000123", 8 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, "four"); + KUNIT_EXPECT_EQ(test, dest[5], '\0'); + KUNIT_EXPECT_EQ(test, dest[6], '\0'); + /* 2 characters copied in okay, 2 ignored. */ + KUNIT_EXPECT_TRUE(test, strncat(dest, "ABCD", 2 + unconst) == dest); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); +} + +static void strlcat_test(struct kunit *test) +{ + char dest[8] = ""; + int len = sizeof(dest) + unconst; + + /* Destination is terminated. */ + KUNIT_EXPECT_EQ(test, strlen(dest), 0); + /* Empty copy is size 0. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "", len), 0); + KUNIT_EXPECT_STREQ(test, dest, ""); + /* Size 1 should keep buffer terminated, report size of source only. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "four", 1 + unconst), 4); + KUNIT_EXPECT_STREQ(test, dest, ""); + + /* 4 characters copied in. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "four", len), 4); + KUNIT_EXPECT_STREQ(test, dest, "four"); + /* 2 characters copied in okay, gets to 6 total. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "AB", len), 6); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); + /* 2 characters ignored if max size (7) reached. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "CD", 7 + unconst), 8); + KUNIT_EXPECT_STREQ(test, dest, "fourAB"); + /* 1 of 2 characters skipped, now at true max size. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "EFG", len), 9); + KUNIT_EXPECT_STREQ(test, dest, "fourABE"); + /* Everything else ignored, now at full size. */ + KUNIT_EXPECT_EQ(test, strlcat(dest, "1234", len), 11); + KUNIT_EXPECT_STREQ(test, dest, "fourABE"); +} + +static struct kunit_case strcat_test_cases[] = { + KUNIT_CASE(strcat_test), + KUNIT_CASE(strncat_test), + KUNIT_CASE(strlcat_test), + {} +}; + +static struct kunit_suite strcat_test_suite = { + .name = "strcat", + .test_cases = strcat_test_cases, +}; + +kunit_test_suite(strcat_test_suite); + +MODULE_LICENSE("GPL"); diff --git a/lib/string.c b/lib/string.c index 3d55ef890106..be26623953d2 100644 --- a/lib/string.c +++ b/lib/string.c @@ -110,7 +110,7 @@ size_t strlcpy(char *dest, const char *src, size_t size) if (size) { size_t len = (ret >= size) ? size - 1 : ret; - memcpy(dest, src, len); + __builtin_memcpy(dest, src, len); dest[len] = '\0'; } return ret; @@ -260,7 +260,7 @@ size_t strlcat(char *dest, const char *src, size_t count) count -= dsize; if (len >= count) len = count-1; - memcpy(dest, src, len); + __builtin_memcpy(dest, src, len); dest[len] = 0; return res; } diff --git a/lib/string_helpers.c b/lib/string_helpers.c index 230020a2e076..d3b1dd718daf 100644 --- a/lib/string_helpers.c +++ b/lib/string_helpers.c @@ -979,18 +979,22 @@ EXPORT_SYMBOL(__sysfs_match_string); /** * strreplace - Replace all occurrences of character in string. - * @s: The string to operate on. + * @str: The string to operate on. * @old: The character being replaced. * @new: The character @old is replaced with. * - * Returns pointer to the nul byte at the end of @s. + * Replaces the each @old character with a @new one in the given string @str. + * + * Return: pointer to the string @str itself. */ -char *strreplace(char *s, char old, char new) +char *strreplace(char *str, char old, char new) { + char *s = str; + for (; *s; ++s) if (*s == old) *s = new; - return s; + return str; } EXPORT_SYMBOL(strreplace); diff --git a/lib/test_firmware.c b/lib/test_firmware.c index 05ed84c2fc4c..1d7d480b8eeb 100644 --- a/lib/test_firmware.c +++ b/lib/test_firmware.c @@ -45,6 +45,7 @@ struct test_batched_req { bool sent; const struct firmware *fw; const char *name; + const char *fw_buf; struct completion completion; struct task_struct *task; struct device *dev; @@ -175,8 +176,14 @@ static void __test_release_all_firmware(void) for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; - if (req->fw) + if (req->fw) { + if (req->fw_buf) { + kfree_const(req->fw_buf); + req->fw_buf = NULL; + } release_firmware(req->fw); + req->fw = NULL; + } } vfree(test_fw_config->reqs); @@ -353,16 +360,26 @@ static ssize_t config_test_show_str(char *dst, return len; } -static int test_dev_config_update_bool(const char *buf, size_t size, +static inline int __test_dev_config_update_bool(const char *buf, size_t size, bool *cfg) { int ret; - mutex_lock(&test_fw_mutex); if (kstrtobool(buf, cfg) < 0) ret = -EINVAL; else ret = size; + + return ret; +} + +static int test_dev_config_update_bool(const char *buf, size_t size, + bool *cfg) +{ + int ret; + + mutex_lock(&test_fw_mutex); + ret = __test_dev_config_update_bool(buf, size, cfg); mutex_unlock(&test_fw_mutex); return ret; @@ -373,7 +390,8 @@ static ssize_t test_dev_config_show_bool(char *buf, bool val) return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static int test_dev_config_update_size_t(const char *buf, +static int __test_dev_config_update_size_t( + const char *buf, size_t size, size_t *cfg) { @@ -384,9 +402,7 @@ static int test_dev_config_update_size_t(const char *buf, if (ret) return ret; - mutex_lock(&test_fw_mutex); *(size_t *)cfg = new; - mutex_unlock(&test_fw_mutex); /* Always return full write size even if we didn't consume all */ return size; @@ -402,7 +418,7 @@ static ssize_t test_dev_config_show_int(char *buf, int val) return snprintf(buf, PAGE_SIZE, "%d\n", val); } -static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) +static int __test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) { u8 val; int ret; @@ -411,14 +427,23 @@ static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) if (ret) return ret; - mutex_lock(&test_fw_mutex); *(u8 *)cfg = val; - mutex_unlock(&test_fw_mutex); /* Always return full write size even if we didn't consume all */ return size; } +static int test_dev_config_update_u8(const char *buf, size_t size, u8 *cfg) +{ + int ret; + + mutex_lock(&test_fw_mutex); + ret = __test_dev_config_update_u8(buf, size, cfg); + mutex_unlock(&test_fw_mutex); + + return ret; +} + static ssize_t test_dev_config_show_u8(char *buf, u8 val) { return snprintf(buf, PAGE_SIZE, "%u\n", val); @@ -471,10 +496,10 @@ static ssize_t config_num_requests_store(struct device *dev, mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_u8(buf, count, - &test_fw_config->num_requests); + rc = __test_dev_config_update_u8(buf, count, + &test_fw_config->num_requests); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -518,10 +543,10 @@ static ssize_t config_buf_size_store(struct device *dev, mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_size_t(buf, count, - &test_fw_config->buf_size); + rc = __test_dev_config_update_size_t(buf, count, + &test_fw_config->buf_size); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -548,10 +573,10 @@ static ssize_t config_file_offset_store(struct device *dev, mutex_unlock(&test_fw_mutex); goto out; } - mutex_unlock(&test_fw_mutex); - rc = test_dev_config_update_size_t(buf, count, - &test_fw_config->file_offset); + rc = __test_dev_config_update_size_t(buf, count, + &test_fw_config->file_offset); + mutex_unlock(&test_fw_mutex); out: return rc; @@ -652,6 +677,8 @@ static ssize_t trigger_request_store(struct device *dev, mutex_lock(&test_fw_mutex); release_firmware(test_firmware); + if (test_fw_config->reqs) + __test_release_all_firmware(); test_firmware = NULL; rc = request_firmware(&test_firmware, name, dev); if (rc) { @@ -752,6 +779,8 @@ static ssize_t trigger_async_request_store(struct device *dev, mutex_lock(&test_fw_mutex); release_firmware(test_firmware); test_firmware = NULL; + if (test_fw_config->reqs) + __test_release_all_firmware(); rc = request_firmware_nowait(THIS_MODULE, 1, name, dev, GFP_KERNEL, NULL, trigger_async_request_cb); if (rc) { @@ -794,6 +823,8 @@ static ssize_t trigger_custom_fallback_store(struct device *dev, mutex_lock(&test_fw_mutex); release_firmware(test_firmware); + if (test_fw_config->reqs) + __test_release_all_firmware(); test_firmware = NULL; rc = request_firmware_nowait(THIS_MODULE, FW_ACTION_NOUEVENT, name, dev, GFP_KERNEL, NULL, @@ -856,6 +887,8 @@ static int test_fw_run_batch_request(void *data) test_fw_config->buf_size); if (!req->fw) kfree(test_buf); + else + req->fw_buf = test_buf; } else { req->rc = test_fw_config->req_firmware(&req->fw, req->name, @@ -895,6 +928,11 @@ static ssize_t trigger_batched_requests_store(struct device *dev, mutex_lock(&test_fw_mutex); + if (test_fw_config->reqs) { + rc = -EBUSY; + goto out_bail; + } + test_fw_config->reqs = vzalloc(array3_size(sizeof(struct test_batched_req), test_fw_config->num_requests, 2)); @@ -911,6 +949,7 @@ static ssize_t trigger_batched_requests_store(struct device *dev, req->fw = NULL; req->idx = i; req->name = test_fw_config->name; + req->fw_buf = NULL; req->dev = dev; init_completion(&req->completion); req->task = kthread_run(test_fw_run_batch_request, req, @@ -993,6 +1032,11 @@ ssize_t trigger_batched_requests_async_store(struct device *dev, mutex_lock(&test_fw_mutex); + if (test_fw_config->reqs) { + rc = -EBUSY; + goto out_bail; + } + test_fw_config->reqs = vzalloc(array3_size(sizeof(struct test_batched_req), test_fw_config->num_requests, 2)); @@ -1010,6 +1054,7 @@ ssize_t trigger_batched_requests_async_store(struct device *dev, for (i = 0; i < test_fw_config->num_requests; i++) { req = &test_fw_config->reqs[i]; req->name = test_fw_config->name; + req->fw_buf = NULL; req->fw = NULL; req->idx = i; init_completion(&req->completion); diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c index f1db333270e9..9939be34e516 100644 --- a/lib/test_maple_tree.c +++ b/lib/test_maple_tree.c @@ -11,12 +11,33 @@ #include <linux/module.h> #define MTREE_ALLOC_MAX 0x2000000000000Ul -#ifndef CONFIG_DEBUG_MAPLE_TREE -#define CONFIG_DEBUG_MAPLE_TREE -#endif #define CONFIG_MAPLE_SEARCH #define MAPLE_32BIT (MAPLE_NODE_SLOTS > 31) +#ifndef CONFIG_DEBUG_MAPLE_TREE +#define mt_dump(mt, fmt) do {} while (0) +#define mt_validate(mt) do {} while (0) +#define mt_cache_shrink() do {} while (0) +#define mas_dump(mas) do {} while (0) +#define mas_wr_dump(mas) do {} while (0) +atomic_t maple_tree_tests_run; +atomic_t maple_tree_tests_passed; +#undef MT_BUG_ON + +#define MT_BUG_ON(__tree, __x) do { \ + atomic_inc(&maple_tree_tests_run); \ + if (__x) { \ + pr_info("BUG at %s:%d (%u)\n", \ + __func__, __LINE__, __x); \ + pr_info("Pass: %u Run:%u\n", \ + atomic_read(&maple_tree_tests_passed), \ + atomic_read(&maple_tree_tests_run)); \ + } else { \ + atomic_inc(&maple_tree_tests_passed); \ + } \ +} while (0) +#endif + /* #define BENCH_SLOT_STORE */ /* #define BENCH_NODE_STORE */ /* #define BENCH_AWALK */ @@ -30,54 +51,54 @@ #else #define cond_resched() do {} while (0) #endif -static -int mtree_insert_index(struct maple_tree *mt, unsigned long index, gfp_t gfp) +static int __init mtree_insert_index(struct maple_tree *mt, + unsigned long index, gfp_t gfp) { return mtree_insert(mt, index, xa_mk_value(index & LONG_MAX), gfp); } -static void mtree_erase_index(struct maple_tree *mt, unsigned long index) +static void __init mtree_erase_index(struct maple_tree *mt, unsigned long index) { MT_BUG_ON(mt, mtree_erase(mt, index) != xa_mk_value(index & LONG_MAX)); MT_BUG_ON(mt, mtree_load(mt, index) != NULL); } -static int mtree_test_insert(struct maple_tree *mt, unsigned long index, +static int __init mtree_test_insert(struct maple_tree *mt, unsigned long index, void *ptr) { return mtree_insert(mt, index, ptr, GFP_KERNEL); } -static int mtree_test_store_range(struct maple_tree *mt, unsigned long start, - unsigned long end, void *ptr) +static int __init mtree_test_store_range(struct maple_tree *mt, + unsigned long start, unsigned long end, void *ptr) { return mtree_store_range(mt, start, end, ptr, GFP_KERNEL); } -static int mtree_test_store(struct maple_tree *mt, unsigned long start, +static int __init mtree_test_store(struct maple_tree *mt, unsigned long start, void *ptr) { return mtree_test_store_range(mt, start, start, ptr); } -static int mtree_test_insert_range(struct maple_tree *mt, unsigned long start, - unsigned long end, void *ptr) +static int __init mtree_test_insert_range(struct maple_tree *mt, + unsigned long start, unsigned long end, void *ptr) { return mtree_insert_range(mt, start, end, ptr, GFP_KERNEL); } -static void *mtree_test_load(struct maple_tree *mt, unsigned long index) +static void __init *mtree_test_load(struct maple_tree *mt, unsigned long index) { return mtree_load(mt, index); } -static void *mtree_test_erase(struct maple_tree *mt, unsigned long index) +static void __init *mtree_test_erase(struct maple_tree *mt, unsigned long index) { return mtree_erase(mt, index); } #if defined(CONFIG_64BIT) -static noinline void check_mtree_alloc_range(struct maple_tree *mt, +static noinline void __init check_mtree_alloc_range(struct maple_tree *mt, unsigned long start, unsigned long end, unsigned long size, unsigned long expected, int eret, void *ptr) { @@ -94,7 +115,7 @@ static noinline void check_mtree_alloc_range(struct maple_tree *mt, MT_BUG_ON(mt, result != expected); } -static noinline void check_mtree_alloc_rrange(struct maple_tree *mt, +static noinline void __init check_mtree_alloc_rrange(struct maple_tree *mt, unsigned long start, unsigned long end, unsigned long size, unsigned long expected, int eret, void *ptr) { @@ -102,7 +123,7 @@ static noinline void check_mtree_alloc_rrange(struct maple_tree *mt, unsigned long result = expected + 1; int ret; - ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end - 1, + ret = mtree_alloc_rrange(mt, &result, ptr, size, start, end, GFP_KERNEL); MT_BUG_ON(mt, ret != eret); if (ret) @@ -112,8 +133,8 @@ static noinline void check_mtree_alloc_rrange(struct maple_tree *mt, } #endif -static noinline void check_load(struct maple_tree *mt, unsigned long index, - void *ptr) +static noinline void __init check_load(struct maple_tree *mt, + unsigned long index, void *ptr) { void *ret = mtree_test_load(mt, index); @@ -122,7 +143,7 @@ static noinline void check_load(struct maple_tree *mt, unsigned long index, MT_BUG_ON(mt, ret != ptr); } -static noinline void check_store_range(struct maple_tree *mt, +static noinline void __init check_store_range(struct maple_tree *mt, unsigned long start, unsigned long end, void *ptr, int expected) { int ret = -EINVAL; @@ -138,7 +159,7 @@ static noinline void check_store_range(struct maple_tree *mt, check_load(mt, i, ptr); } -static noinline void check_insert_range(struct maple_tree *mt, +static noinline void __init check_insert_range(struct maple_tree *mt, unsigned long start, unsigned long end, void *ptr, int expected) { int ret = -EINVAL; @@ -154,8 +175,8 @@ static noinline void check_insert_range(struct maple_tree *mt, check_load(mt, i, ptr); } -static noinline void check_insert(struct maple_tree *mt, unsigned long index, - void *ptr) +static noinline void __init check_insert(struct maple_tree *mt, + unsigned long index, void *ptr) { int ret = -EINVAL; @@ -163,7 +184,7 @@ static noinline void check_insert(struct maple_tree *mt, unsigned long index, MT_BUG_ON(mt, ret != 0); } -static noinline void check_dup_insert(struct maple_tree *mt, +static noinline void __init check_dup_insert(struct maple_tree *mt, unsigned long index, void *ptr) { int ret = -EINVAL; @@ -173,13 +194,13 @@ static noinline void check_dup_insert(struct maple_tree *mt, } -static noinline -void check_index_load(struct maple_tree *mt, unsigned long index) +static noinline void __init check_index_load(struct maple_tree *mt, + unsigned long index) { return check_load(mt, index, xa_mk_value(index & LONG_MAX)); } -static inline int not_empty(struct maple_node *node) +static inline __init int not_empty(struct maple_node *node) { int i; @@ -194,8 +215,8 @@ static inline int not_empty(struct maple_node *node) } -static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, - bool verbose) +static noinline void __init check_rev_seq(struct maple_tree *mt, + unsigned long max, bool verbose) { unsigned long i = max, j; @@ -219,7 +240,7 @@ static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, #ifndef __KERNEL__ if (verbose) { rcu_barrier(); - mt_dump(mt); + mt_dump(mt, mt_dump_dec); pr_info(" %s test of 0-%lu %luK in %d active (%d total)\n", __func__, max, mt_get_alloc_size()/1024, mt_nr_allocated(), mt_nr_tallocated()); @@ -227,7 +248,7 @@ static noinline void check_rev_seq(struct maple_tree *mt, unsigned long max, #endif } -static noinline void check_seq(struct maple_tree *mt, unsigned long max, +static noinline void __init check_seq(struct maple_tree *mt, unsigned long max, bool verbose) { unsigned long i, j; @@ -248,7 +269,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max, #ifndef __KERNEL__ if (verbose) { rcu_barrier(); - mt_dump(mt); + mt_dump(mt, mt_dump_dec); pr_info(" seq test of 0-%lu %luK in %d active (%d total)\n", max, mt_get_alloc_size()/1024, mt_nr_allocated(), mt_nr_tallocated()); @@ -256,7 +277,7 @@ static noinline void check_seq(struct maple_tree *mt, unsigned long max, #endif } -static noinline void check_lb_not_empty(struct maple_tree *mt) +static noinline void __init check_lb_not_empty(struct maple_tree *mt) { unsigned long i, j; unsigned long huge = 4000UL * 1000 * 1000; @@ -275,13 +296,13 @@ static noinline void check_lb_not_empty(struct maple_tree *mt) mtree_destroy(mt); } -static noinline void check_lower_bound_split(struct maple_tree *mt) +static noinline void __init check_lower_bound_split(struct maple_tree *mt) { MT_BUG_ON(mt, !mtree_empty(mt)); check_lb_not_empty(mt); } -static noinline void check_upper_bound_split(struct maple_tree *mt) +static noinline void __init check_upper_bound_split(struct maple_tree *mt) { unsigned long i, j; unsigned long huge; @@ -306,7 +327,7 @@ static noinline void check_upper_bound_split(struct maple_tree *mt) mtree_destroy(mt); } -static noinline void check_mid_split(struct maple_tree *mt) +static noinline void __init check_mid_split(struct maple_tree *mt) { unsigned long huge = 8000UL * 1000 * 1000; @@ -315,7 +336,7 @@ static noinline void check_mid_split(struct maple_tree *mt) check_lb_not_empty(mt); } -static noinline void check_rev_find(struct maple_tree *mt) +static noinline void __init check_rev_find(struct maple_tree *mt) { int i, nr_entries = 200; void *val; @@ -354,7 +375,7 @@ static noinline void check_rev_find(struct maple_tree *mt) rcu_read_unlock(); } -static noinline void check_find(struct maple_tree *mt) +static noinline void __init check_find(struct maple_tree *mt) { unsigned long val = 0; unsigned long count; @@ -571,7 +592,7 @@ static noinline void check_find(struct maple_tree *mt) mtree_destroy(mt); } -static noinline void check_find_2(struct maple_tree *mt) +static noinline void __init check_find_2(struct maple_tree *mt) { unsigned long i, j; void *entry; @@ -616,7 +637,7 @@ static noinline void check_find_2(struct maple_tree *mt) #if defined(CONFIG_64BIT) -static noinline void check_alloc_rev_range(struct maple_tree *mt) +static noinline void __init check_alloc_rev_range(struct maple_tree *mt) { /* * Generated by: @@ -624,7 +645,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}' */ - unsigned long range[] = { + static const unsigned long range[] = { /* Inclusive , Exclusive. */ 0x565234af2000, 0x565234af4000, 0x565234af4000, 0x565234af9000, @@ -652,7 +673,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) 0x7fff58791000, 0x7fff58793000, }; - unsigned long holes[] = { + static const unsigned long holes[] = { /* * Note: start of hole is INCLUSIVE * end of hole is EXCLUSIVE @@ -672,7 +693,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) * 4. number that should be returned. * 5. return value */ - unsigned long req_range[] = { + static const unsigned long req_range[] = { 0x565234af9000, /* Min */ 0x7fff58791000, /* Max */ 0x1000, /* Size */ @@ -680,7 +701,7 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) 0, /* Return value success. */ 0x0, /* Min */ - 0x565234AF1 << 12, /* Max */ + 0x565234AF0 << 12, /* Max */ 0x3000, /* Size */ 0x565234AEE << 12, /* max - 3. */ 0, /* Return value success. */ @@ -692,14 +713,14 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) 0, /* Return value success. */ 0x0, /* Min */ - 0x7F36D510A << 12, /* Max */ + 0x7F36D5109 << 12, /* Max */ 0x4000, /* Size */ 0x7F36D5106 << 12, /* First rev hole of size 0x4000 */ 0, /* Return value success. */ /* Ascend test. */ 0x0, - 34148798629 << 12, + 34148798628 << 12, 19 << 12, 34148797418 << 12, 0x0, @@ -711,6 +732,12 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) 0x0, -EBUSY, + /* Single space test. */ + 34148798725 << 12, + 34148798725 << 12, + 1 << 12, + 34148798725 << 12, + 0, }; int i, range_count = ARRAY_SIZE(range); @@ -759,9 +786,9 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) mas_unlock(&mas); for (i = 0; i < req_range_count; i += 5) { #if DEBUG_REV_RANGE - pr_debug("\tReverse request between %lu-%lu size %lu, should get %lu\n", - req_range[i] >> 12, - (req_range[i + 1] >> 12) - 1, + pr_debug("\tReverse request %d between %lu-%lu size %lu, should get %lu\n", + i, req_range[i] >> 12, + (req_range[i + 1] >> 12), req_range[i+2] >> 12, req_range[i+3] >> 12); #endif @@ -777,13 +804,14 @@ static noinline void check_alloc_rev_range(struct maple_tree *mt) mt_set_non_kernel(1); mtree_erase(mt, 34148798727); /* create a deleted range. */ + mtree_erase(mt, 34148798725); check_mtree_alloc_rrange(mt, 0, 34359052173, 210253414, 34148798725, 0, mt); mtree_destroy(mt); } -static noinline void check_alloc_range(struct maple_tree *mt) +static noinline void __init check_alloc_range(struct maple_tree *mt) { /* * Generated by: @@ -791,7 +819,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) * awk -F "-" '{printf "0x%s, 0x%s, ", $1, $2}' */ - unsigned long range[] = { + static const unsigned long range[] = { /* Inclusive , Exclusive. */ 0x565234af2000, 0x565234af4000, 0x565234af4000, 0x565234af9000, @@ -818,7 +846,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) 0x7fff5878e000, 0x7fff58791000, 0x7fff58791000, 0x7fff58793000, }; - unsigned long holes[] = { + static const unsigned long holes[] = { /* Start of hole, end of hole, size of hole (+1) */ 0x565234afb000, 0x565234afc000, 0x1000, 0x565234afe000, 0x565235def000, 0x12F1000, @@ -833,7 +861,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) * 4. number that should be returned. * 5. return value */ - unsigned long req_range[] = { + static const unsigned long req_range[] = { 0x565234af9000, /* Min */ 0x7fff58791000, /* Max */ 0x1000, /* Size */ @@ -880,6 +908,13 @@ static noinline void check_alloc_range(struct maple_tree *mt) 4503599618982063UL << 12, /* Size */ 34359052178 << 12, /* Expected location */ -EBUSY, /* Return failure. */ + + /* Test a single entry */ + 34148798648 << 12, /* Min */ + 34148798648 << 12, /* Max */ + 4096, /* Size of 1 */ + 34148798648 << 12, /* Location is the same as min/max */ + 0, /* Success */ }; int i, range_count = ARRAY_SIZE(range); int req_range_count = ARRAY_SIZE(req_range); @@ -893,7 +928,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) #if DEBUG_ALLOC_RANGE pr_debug("\tInsert %lu-%lu\n", range[i] >> 12, (range[i + 1] >> 12) - 1); - mt_dump(mt); + mt_dump(mt, mt_dump_hex); #endif check_insert_range(mt, range[i] >> 12, (range[i + 1] >> 12) - 1, xa_mk_value(range[i] >> 12), 0); @@ -934,7 +969,7 @@ static noinline void check_alloc_range(struct maple_tree *mt) xa_mk_value(req_range[i] >> 12)); /* pointer */ mt_validate(mt); #if DEBUG_ALLOC_RANGE - mt_dump(mt); + mt_dump(mt, mt_dump_hex); #endif } @@ -942,10 +977,10 @@ static noinline void check_alloc_range(struct maple_tree *mt) } #endif -static noinline void check_ranges(struct maple_tree *mt) +static noinline void __init check_ranges(struct maple_tree *mt) { int i, val, val2; - unsigned long r[] = { + static const unsigned long r[] = { 10, 15, 20, 25, 17, 22, /* Overlaps previous range. */ @@ -1210,7 +1245,7 @@ static noinline void check_ranges(struct maple_tree *mt) MT_BUG_ON(mt, mt_height(mt) != 4); } -static noinline void check_next_entry(struct maple_tree *mt) +static noinline void __init check_next_entry(struct maple_tree *mt) { void *entry = NULL; unsigned long limit = 30, i = 0; @@ -1234,7 +1269,7 @@ static noinline void check_next_entry(struct maple_tree *mt) mtree_destroy(mt); } -static noinline void check_prev_entry(struct maple_tree *mt) +static noinline void __init check_prev_entry(struct maple_tree *mt) { unsigned long index = 16; void *value; @@ -1278,7 +1313,7 @@ static noinline void check_prev_entry(struct maple_tree *mt) mas_unlock(&mas); } -static noinline void check_root_expand(struct maple_tree *mt) +static noinline void __init check_root_expand(struct maple_tree *mt) { MA_STATE(mas, mt, 0, 0); void *ptr; @@ -1287,6 +1322,7 @@ static noinline void check_root_expand(struct maple_tree *mt) mas_lock(&mas); mas_set(&mas, 3); ptr = mas_walk(&mas); + MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, ptr != NULL); MT_BUG_ON(mt, mas.index != 0); MT_BUG_ON(mt, mas.last != ULONG_MAX); @@ -1356,7 +1392,7 @@ static noinline void check_root_expand(struct maple_tree *mt) mas_store_gfp(&mas, ptr, GFP_KERNEL); ptr = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, ptr != NULL); - MT_BUG_ON(mt, (mas.index != 1) && (mas.last != ULONG_MAX)); + MT_BUG_ON(mt, (mas.index != ULONG_MAX) && (mas.last != ULONG_MAX)); mas_set(&mas, 1); ptr = mas_prev(&mas, 0); @@ -1367,13 +1403,13 @@ static noinline void check_root_expand(struct maple_tree *mt) mas_unlock(&mas); } -static noinline void check_gap_combining(struct maple_tree *mt) +static noinline void __init check_gap_combining(struct maple_tree *mt) { struct maple_enode *mn1, *mn2; void *entry; unsigned long singletons = 100; - unsigned long *seq100; - unsigned long seq100_64[] = { + static const unsigned long *seq100; + static const unsigned long seq100_64[] = { /* 0-5 */ 74, 75, 76, 50, 100, 2, @@ -1387,7 +1423,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) 76, 2, 79, 85, 4, }; - unsigned long seq100_32[] = { + static const unsigned long seq100_32[] = { /* 0-5 */ 61, 62, 63, 50, 100, 2, @@ -1401,11 +1437,11 @@ static noinline void check_gap_combining(struct maple_tree *mt) 76, 2, 79, 85, 4, }; - unsigned long seq2000[] = { + static const unsigned long seq2000[] = { 1152, 1151, 1100, 1200, 2, }; - unsigned long seq400[] = { + static const unsigned long seq400[] = { 286, 318, 256, 260, 266, 270, 275, 280, 290, 398, 286, 310, @@ -1564,7 +1600,7 @@ static noinline void check_gap_combining(struct maple_tree *mt) mt_set_non_kernel(0); mtree_destroy(mt); } -static noinline void check_node_overwrite(struct maple_tree *mt) +static noinline void __init check_node_overwrite(struct maple_tree *mt) { int i, max = 4000; @@ -1572,12 +1608,12 @@ static noinline void check_node_overwrite(struct maple_tree *mt) mtree_test_store_range(mt, i*100, i*100 + 50, xa_mk_value(i*100)); mtree_test_store_range(mt, 319951, 367950, NULL); - /*mt_dump(mt); */ + /*mt_dump(mt, mt_dump_dec); */ mt_validate(mt); } #if defined(BENCH_SLOT_STORE) -static noinline void bench_slot_store(struct maple_tree *mt) +static noinline void __init bench_slot_store(struct maple_tree *mt) { int i, brk = 105, max = 1040, brk_start = 100, count = 20000000; @@ -1593,7 +1629,7 @@ static noinline void bench_slot_store(struct maple_tree *mt) #endif #if defined(BENCH_NODE_STORE) -static noinline void bench_node_store(struct maple_tree *mt) +static noinline void __init bench_node_store(struct maple_tree *mt) { int i, overwrite = 76, max = 240, count = 20000000; @@ -1612,7 +1648,7 @@ static noinline void bench_node_store(struct maple_tree *mt) #endif #if defined(BENCH_AWALK) -static noinline void bench_awalk(struct maple_tree *mt) +static noinline void __init bench_awalk(struct maple_tree *mt) { int i, max = 2500, count = 50000000; MA_STATE(mas, mt, 1470, 1470); @@ -1629,7 +1665,7 @@ static noinline void bench_awalk(struct maple_tree *mt) } #endif #if defined(BENCH_WALK) -static noinline void bench_walk(struct maple_tree *mt) +static noinline void __init bench_walk(struct maple_tree *mt) { int i, max = 2500, count = 550000000; MA_STATE(mas, mt, 1470, 1470); @@ -1646,7 +1682,7 @@ static noinline void bench_walk(struct maple_tree *mt) #endif #if defined(BENCH_MT_FOR_EACH) -static noinline void bench_mt_for_each(struct maple_tree *mt) +static noinline void __init bench_mt_for_each(struct maple_tree *mt) { int i, count = 1000000; unsigned long max = 2500, index = 0; @@ -1670,7 +1706,7 @@ static noinline void bench_mt_for_each(struct maple_tree *mt) #endif /* check_forking - simulate the kernel forking sequence with the tree. */ -static noinline void check_forking(struct maple_tree *mt) +static noinline void __init check_forking(struct maple_tree *mt) { struct maple_tree newmt; @@ -1709,7 +1745,7 @@ static noinline void check_forking(struct maple_tree *mt) mtree_destroy(&newmt); } -static noinline void check_iteration(struct maple_tree *mt) +static noinline void __init check_iteration(struct maple_tree *mt) { int i, nr_entries = 125; void *val; @@ -1765,7 +1801,6 @@ static noinline void check_iteration(struct maple_tree *mt) mas.index = 760; mas.last = 765; mas_store(&mas, val); - mas_next(&mas, ULONG_MAX); } i++; } @@ -1777,7 +1812,7 @@ static noinline void check_iteration(struct maple_tree *mt) mt_set_non_kernel(0); } -static noinline void check_mas_store_gfp(struct maple_tree *mt) +static noinline void __init check_mas_store_gfp(struct maple_tree *mt) { struct maple_tree newmt; @@ -1810,7 +1845,7 @@ static noinline void check_mas_store_gfp(struct maple_tree *mt) } #if defined(BENCH_FORK) -static noinline void bench_forking(struct maple_tree *mt) +static noinline void __init bench_forking(struct maple_tree *mt) { struct maple_tree newmt; @@ -1852,15 +1887,17 @@ static noinline void bench_forking(struct maple_tree *mt) } #endif -static noinline void next_prev_test(struct maple_tree *mt) +static noinline void __init next_prev_test(struct maple_tree *mt) { int i, nr_entries; void *val; MA_STATE(mas, mt, 0, 0); struct maple_enode *mn; - unsigned long *level2; - unsigned long level2_64[] = {707, 1000, 710, 715, 720, 725}; - unsigned long level2_32[] = {1747, 2000, 1750, 1755, 1760, 1765}; + static const unsigned long *level2; + static const unsigned long level2_64[] = { 707, 1000, 710, 715, 720, + 725}; + static const unsigned long level2_32[] = { 1747, 2000, 1750, 1755, + 1760, 1765}; if (MAPLE_32BIT) { nr_entries = 500; @@ -1974,7 +2011,7 @@ static noinline void next_prev_test(struct maple_tree *mt) val = mas_next(&mas, ULONG_MAX); MT_BUG_ON(mt, val != NULL); - MT_BUG_ON(mt, mas.index != ULONG_MAX); + MT_BUG_ON(mt, mas.index != 0x7d6); MT_BUG_ON(mt, mas.last != ULONG_MAX); val = mas_prev(&mas, 0); @@ -1998,7 +2035,8 @@ static noinline void next_prev_test(struct maple_tree *mt) val = mas_prev(&mas, 0); MT_BUG_ON(mt, val != NULL); MT_BUG_ON(mt, mas.index != 0); - MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.last != 5); + MT_BUG_ON(mt, mas.node != MAS_NONE); mas.index = 0; mas.last = 5; @@ -2010,7 +2048,7 @@ static noinline void next_prev_test(struct maple_tree *mt) val = mas_prev(&mas, 0); MT_BUG_ON(mt, val != NULL); MT_BUG_ON(mt, mas.index != 0); - MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.last != 9); mas_unlock(&mas); mtree_destroy(mt); @@ -2028,7 +2066,7 @@ static noinline void next_prev_test(struct maple_tree *mt) /* Test spanning writes that require balancing right sibling or right cousin */ -static noinline void check_spanning_relatives(struct maple_tree *mt) +static noinline void __init check_spanning_relatives(struct maple_tree *mt) { unsigned long i, nr_entries = 1000; @@ -2041,7 +2079,7 @@ static noinline void check_spanning_relatives(struct maple_tree *mt) mtree_store_range(mt, 9365, 9955, NULL, GFP_KERNEL); } -static noinline void check_fuzzer(struct maple_tree *mt) +static noinline void __init check_fuzzer(struct maple_tree *mt) { /* * 1. Causes a spanning rebalance of a single root node. @@ -2438,7 +2476,7 @@ static noinline void check_fuzzer(struct maple_tree *mt) } /* duplicate the tree with a specific gap */ -static noinline void check_dup_gaps(struct maple_tree *mt, +static noinline void __init check_dup_gaps(struct maple_tree *mt, unsigned long nr_entries, bool zero_start, unsigned long gap) { @@ -2478,7 +2516,7 @@ static noinline void check_dup_gaps(struct maple_tree *mt, } /* Duplicate many sizes of trees. Mainly to test expected entry values */ -static noinline void check_dup(struct maple_tree *mt) +static noinline void __init check_dup(struct maple_tree *mt) { int i; int big_start = 100010; @@ -2566,7 +2604,7 @@ static noinline void check_dup(struct maple_tree *mt) } } -static noinline void check_bnode_min_spanning(struct maple_tree *mt) +static noinline void __init check_bnode_min_spanning(struct maple_tree *mt) { int i = 50; MA_STATE(mas, mt, 0, 0); @@ -2585,7 +2623,7 @@ static noinline void check_bnode_min_spanning(struct maple_tree *mt) mt_set_non_kernel(0); } -static noinline void check_empty_area_window(struct maple_tree *mt) +static noinline void __init check_empty_area_window(struct maple_tree *mt) { unsigned long i, nr_entries = 20; MA_STATE(mas, mt, 0, 0); @@ -2660,7 +2698,7 @@ static noinline void check_empty_area_window(struct maple_tree *mt) MT_BUG_ON(mt, mas_empty_area(&mas, 5, 100, 6) != -EBUSY); mas_reset(&mas); - MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EBUSY); + MT_BUG_ON(mt, mas_empty_area(&mas, 0, 8, 10) != -EINVAL); mas_reset(&mas); mas_empty_area(&mas, 100, 165, 3); @@ -2670,7 +2708,7 @@ static noinline void check_empty_area_window(struct maple_tree *mt) rcu_read_unlock(); } -static noinline void check_empty_area_fill(struct maple_tree *mt) +static noinline void __init check_empty_area_fill(struct maple_tree *mt) { const unsigned long max = 0x25D78000; unsigned long size; @@ -2713,12 +2751,635 @@ static noinline void check_empty_area_fill(struct maple_tree *mt) mt_set_non_kernel(0); } +/* + * Check MAS_START, MAS_PAUSE, active (implied), and MAS_NONE transitions. + * + * The table below shows the single entry tree (0-0 pointer) and normal tree + * with nodes. + * + * Function ENTRY Start Result index & last + * ┬ ┬ ┬ ┬ ┬ + * │ │ │ │ └─ the final range + * │ │ │ └─ The node value after execution + * │ │ └─ The node value before execution + * │ └─ If the entry exists or does not exists (DNE) + * └─ The function name + * + * Function ENTRY Start Result index & last + * mas_next() + * - after last + * Single entry tree at 0-0 + * ------------------------ + * DNE MAS_START MAS_NONE 1 - oo + * DNE MAS_PAUSE MAS_NONE 1 - oo + * DNE MAS_ROOT MAS_NONE 1 - oo + * when index = 0 + * DNE MAS_NONE MAS_ROOT 0 + * when index > 0 + * DNE MAS_NONE MAS_NONE 1 - oo + * + * Normal tree + * ----------- + * exists MAS_START active range + * DNE MAS_START active set to last range + * exists MAS_PAUSE active range + * DNE MAS_PAUSE active set to last range + * exists MAS_NONE active range + * exists active active range + * DNE active active set to last range + * + * Function ENTRY Start Result index & last + * mas_prev() + * - before index + * Single entry tree at 0-0 + * ------------------------ + * if index > 0 + * exists MAS_START MAS_ROOT 0 + * exists MAS_PAUSE MAS_ROOT 0 + * exists MAS_NONE MAS_ROOT 0 + * + * if index == 0 + * DNE MAS_START MAS_NONE 0 + * DNE MAS_PAUSE MAS_NONE 0 + * DNE MAS_NONE MAS_NONE 0 + * DNE MAS_ROOT MAS_NONE 0 + * + * Normal tree + * ----------- + * exists MAS_START active range + * DNE MAS_START active set to min + * exists MAS_PAUSE active range + * DNE MAS_PAUSE active set to min + * exists MAS_NONE active range + * DNE MAS_NONE MAS_NONE set to min + * any MAS_ROOT MAS_NONE 0 + * exists active active range + * DNE active active last range + * + * Function ENTRY Start Result index & last + * mas_find() + * - at index or next + * Single entry tree at 0-0 + * ------------------------ + * if index > 0 + * DNE MAS_START MAS_NONE 0 + * DNE MAS_PAUSE MAS_NONE 0 + * DNE MAS_ROOT MAS_NONE 0 + * DNE MAS_NONE MAS_NONE 0 + * if index == 0 + * exists MAS_START MAS_ROOT 0 + * exists MAS_PAUSE MAS_ROOT 0 + * exists MAS_NONE MAS_ROOT 0 + * + * Normal tree + * ----------- + * exists MAS_START active range + * DNE MAS_START active set to max + * exists MAS_PAUSE active range + * DNE MAS_PAUSE active set to max + * exists MAS_NONE active range + * exists active active range + * DNE active active last range (max < last) + * + * Function ENTRY Start Result index & last + * mas_find_rev() + * - at index or before + * Single entry tree at 0-0 + * ------------------------ + * if index > 0 + * exists MAS_START MAS_ROOT 0 + * exists MAS_PAUSE MAS_ROOT 0 + * exists MAS_NONE MAS_ROOT 0 + * if index == 0 + * DNE MAS_START MAS_NONE 0 + * DNE MAS_PAUSE MAS_NONE 0 + * DNE MAS_NONE MAS_NONE 0 + * DNE MAS_ROOT MAS_NONE 0 + * + * Normal tree + * ----------- + * exists MAS_START active range + * DNE MAS_START active set to min + * exists MAS_PAUSE active range + * DNE MAS_PAUSE active set to min + * exists MAS_NONE active range + * exists active active range + * DNE active active last range (min > index) + * + * Function ENTRY Start Result index & last + * mas_walk() + * - Look up index + * Single entry tree at 0-0 + * ------------------------ + * if index > 0 + * DNE MAS_START MAS_ROOT 1 - oo + * DNE MAS_PAUSE MAS_ROOT 1 - oo + * DNE MAS_NONE MAS_ROOT 1 - oo + * DNE MAS_ROOT MAS_ROOT 1 - oo + * if index == 0 + * exists MAS_START MAS_ROOT 0 + * exists MAS_PAUSE MAS_ROOT 0 + * exists MAS_NONE MAS_ROOT 0 + * exists MAS_ROOT MAS_ROOT 0 + * + * Normal tree + * ----------- + * exists MAS_START active range + * DNE MAS_START active range of NULL + * exists MAS_PAUSE active range + * DNE MAS_PAUSE active range of NULL + * exists MAS_NONE active range + * DNE MAS_NONE active range of NULL + * exists active active range + * DNE active active range of NULL + */ + +#define mas_active(x) (((x).node != MAS_ROOT) && \ + ((x).node != MAS_START) && \ + ((x).node != MAS_PAUSE) && \ + ((x).node != MAS_NONE)) +static noinline void __init check_state_handling(struct maple_tree *mt) +{ + MA_STATE(mas, mt, 0, 0); + void *entry, *ptr = (void *) 0x1234500; + void *ptr2 = &ptr; + void *ptr3 = &ptr2; + + /* Check MAS_ROOT First */ + mtree_store_range(mt, 0, 0, ptr, GFP_KERNEL); + + mas_lock(&mas); + /* prev: Start -> none */ + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* prev: Start -> root */ + mas_set(&mas, 10); + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* prev: pause -> root */ + mas_set(&mas, 10); + mas_pause(&mas); + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* next: start -> none */ + mas_set(&mas, 0); + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* next: start -> none */ + mas_set(&mas, 10); + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* find: start -> root */ + mas_set(&mas, 0); + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* find: root -> none */ + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* find: none -> none */ + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* find: start -> none */ + mas_set(&mas, 10); + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* find_rev: none -> root */ + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* find_rev: start -> root */ + mas_set(&mas, 0); + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* find_rev: root -> none */ + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* find_rev: none -> none */ + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* find_rev: start -> root */ + mas_set(&mas, 10); + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* walk: start -> none */ + mas_set(&mas, 10); + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* walk: pause -> none*/ + mas_set(&mas, 10); + mas_pause(&mas); + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* walk: none -> none */ + mas.index = mas.last = 10; + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* walk: none -> none */ + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* walk: start -> root */ + mas_set(&mas, 0); + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* walk: pause -> root */ + mas_set(&mas, 0); + mas_pause(&mas); + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* walk: none -> root */ + mas.node = MAS_NONE; + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* walk: root -> root */ + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + /* walk: root -> none */ + mas_set(&mas, 10); + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 1); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, mas.node != MAS_NONE); + + /* walk: none -> root */ + mas.index = mas.last = 0; + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0); + MT_BUG_ON(mt, mas.node != MAS_ROOT); + + mas_unlock(&mas); + + /* Check when there is an actual node */ + mtree_store_range(mt, 0, 0, NULL, GFP_KERNEL); + mtree_store_range(mt, 0x1000, 0x1500, ptr, GFP_KERNEL); + mtree_store_range(mt, 0x2000, 0x2500, ptr2, GFP_KERNEL); + mtree_store_range(mt, 0x3000, 0x3500, ptr3, GFP_KERNEL); + + mas_lock(&mas); + + /* next: start ->active */ + mas_set(&mas, 0); + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* next: pause ->active */ + mas_set(&mas, 0); + mas_pause(&mas); + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* next: none ->active */ + mas.index = mas.last = 0; + mas.offset = 0; + mas.node = MAS_NONE; + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* next:active ->active */ + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr2); + MT_BUG_ON(mt, mas.index != 0x2000); + MT_BUG_ON(mt, mas.last != 0x2500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* next:active -> active out of range*/ + entry = mas_next(&mas, 0x2999); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x2501); + MT_BUG_ON(mt, mas.last != 0x2fff); + MT_BUG_ON(mt, !mas_active(mas)); + + /* Continue after out of range*/ + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr3); + MT_BUG_ON(mt, mas.index != 0x3000); + MT_BUG_ON(mt, mas.last != 0x3500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* next:active -> active out of range*/ + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x3501); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, !mas_active(mas)); + + /* next: none -> active, skip value at location */ + mas_set(&mas, 0); + entry = mas_next(&mas, ULONG_MAX); + mas.node = MAS_NONE; + mas.offset = 0; + entry = mas_next(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr2); + MT_BUG_ON(mt, mas.index != 0x2000); + MT_BUG_ON(mt, mas.last != 0x2500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* prev:active ->active */ + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* prev:active -> active out of range*/ + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0x0FFF); + MT_BUG_ON(mt, !mas_active(mas)); + + /* prev: pause ->active */ + mas_set(&mas, 0x3600); + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != ptr3); + mas_pause(&mas); + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != ptr2); + MT_BUG_ON(mt, mas.index != 0x2000); + MT_BUG_ON(mt, mas.last != 0x2500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* prev:active -> active out of range*/ + entry = mas_prev(&mas, 0x1600); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x1501); + MT_BUG_ON(mt, mas.last != 0x1FFF); + MT_BUG_ON(mt, !mas_active(mas)); + + /* prev: active ->active, continue*/ + entry = mas_prev(&mas, 0); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find: start ->active */ + mas_set(&mas, 0); + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find: pause ->active */ + mas_set(&mas, 0); + mas_pause(&mas); + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find: start ->active on value */; + mas_set(&mas, 1200); + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find:active ->active */ + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != ptr2); + MT_BUG_ON(mt, mas.index != 0x2000); + MT_BUG_ON(mt, mas.last != 0x2500); + MT_BUG_ON(mt, !mas_active(mas)); + + + /* find:active -> active (NULL)*/ + entry = mas_find(&mas, 0x2700); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x2501); + MT_BUG_ON(mt, mas.last != 0x2FFF); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find: none ->active */ + entry = mas_find(&mas, 0x5000); + MT_BUG_ON(mt, entry != ptr3); + MT_BUG_ON(mt, mas.index != 0x3000); + MT_BUG_ON(mt, mas.last != 0x3500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find:active -> active (NULL) end*/ + entry = mas_find(&mas, ULONG_MAX); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x3501); + MT_BUG_ON(mt, mas.last != ULONG_MAX); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find_rev: active (END) ->active */ + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != ptr3); + MT_BUG_ON(mt, mas.index != 0x3000); + MT_BUG_ON(mt, mas.last != 0x3500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find_rev:active ->active */ + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != ptr2); + MT_BUG_ON(mt, mas.index != 0x2000); + MT_BUG_ON(mt, mas.last != 0x2500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find_rev: pause ->active */ + mas_pause(&mas); + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find_rev:active -> active */ + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0); + MT_BUG_ON(mt, mas.last != 0x0FFF); + MT_BUG_ON(mt, !mas_active(mas)); + + /* find_rev: start ->active */ + mas_set(&mas, 0x1200); + entry = mas_find_rev(&mas, 0); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* mas_walk start ->active */ + mas_set(&mas, 0x1200); + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* mas_walk start ->active */ + mas_set(&mas, 0x1600); + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x1501); + MT_BUG_ON(mt, mas.last != 0x1fff); + MT_BUG_ON(mt, !mas_active(mas)); + + /* mas_walk pause ->active */ + mas_set(&mas, 0x1200); + mas_pause(&mas); + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* mas_walk pause -> active */ + mas_set(&mas, 0x1600); + mas_pause(&mas); + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x1501); + MT_BUG_ON(mt, mas.last != 0x1fff); + MT_BUG_ON(mt, !mas_active(mas)); + + /* mas_walk none -> active */ + mas_set(&mas, 0x1200); + mas.node = MAS_NONE; + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* mas_walk none -> active */ + mas_set(&mas, 0x1600); + mas.node = MAS_NONE; + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x1501); + MT_BUG_ON(mt, mas.last != 0x1fff); + MT_BUG_ON(mt, !mas_active(mas)); + + /* mas_walk active -> active */ + mas.index = 0x1200; + mas.last = 0x1200; + mas.offset = 0; + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != ptr); + MT_BUG_ON(mt, mas.index != 0x1000); + MT_BUG_ON(mt, mas.last != 0x1500); + MT_BUG_ON(mt, !mas_active(mas)); + + /* mas_walk active -> active */ + mas.index = 0x1600; + mas.last = 0x1600; + entry = mas_walk(&mas); + MT_BUG_ON(mt, entry != NULL); + MT_BUG_ON(mt, mas.index != 0x1501); + MT_BUG_ON(mt, mas.last != 0x1fff); + MT_BUG_ON(mt, !mas_active(mas)); + + mas_unlock(&mas); +} + static DEFINE_MTREE(tree); -static int maple_tree_seed(void) +static int __init maple_tree_seed(void) { - unsigned long set[] = {5015, 5014, 5017, 25, 1000, - 1001, 1002, 1003, 1005, 0, - 5003, 5002}; + unsigned long set[] = { 5015, 5014, 5017, 25, 1000, + 1001, 1002, 1003, 1005, 0, + 5003, 5002}; void *ptr = &set; pr_info("\nTEST STARTING\n\n"); @@ -2974,6 +3635,10 @@ static int maple_tree_seed(void) mtree_destroy(&tree); + mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE); + check_state_handling(&tree); + mtree_destroy(&tree); + #if defined(BENCH) skip: #endif @@ -2988,7 +3653,7 @@ skip: return -EINVAL; } -static void maple_tree_harvest(void) +static void __exit maple_tree_harvest(void) { } diff --git a/lib/test_vmalloc.c b/lib/test_vmalloc.c index 9dd9745d365f..3718d9886407 100644 --- a/lib/test_vmalloc.c +++ b/lib/test_vmalloc.c @@ -369,7 +369,7 @@ vm_map_ram_test(void) int i; map_nr_pages = nr_pages > 0 ? nr_pages:1; - pages = kmalloc(map_nr_pages * sizeof(struct page), GFP_KERNEL); + pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) return -1; diff --git a/lib/ubsan.c b/lib/ubsan.c index e2cc4a799312..3f90810f9f42 100644 --- a/lib/ubsan.c +++ b/lib/ubsan.c @@ -425,9 +425,6 @@ EXPORT_SYMBOL(__ubsan_handle_load_invalid_value); void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr, unsigned long align, - unsigned long offset); -void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr, - unsigned long align, unsigned long offset) { struct alignment_assumption_data *data = _data; diff --git a/lib/ubsan.h b/lib/ubsan.h index cc5cb94895a6..5d99ab81913b 100644 --- a/lib/ubsan.h +++ b/lib/ubsan.h @@ -124,4 +124,15 @@ typedef s64 s_max; typedef u64 u_max; #endif +void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs); +void __ubsan_handle_type_mismatch(struct type_mismatch_data *data, void *ptr); +void __ubsan_handle_type_mismatch_v1(void *_data, void *ptr); +void __ubsan_handle_out_of_bounds(void *_data, void *index); +void __ubsan_handle_shift_out_of_bounds(void *_data, void *lhs, void *rhs); +void __ubsan_handle_builtin_unreachable(void *_data); +void __ubsan_handle_load_invalid_value(void *_data, void *val); +void __ubsan_handle_alignment_assumption(void *_data, unsigned long ptr, + unsigned long align, + unsigned long offset); + #endif diff --git a/lib/xarray.c b/lib/xarray.c index ea9ce1f0b386..2071a3718f4e 100644 --- a/lib/xarray.c +++ b/lib/xarray.c @@ -12,6 +12,8 @@ #include <linux/slab.h> #include <linux/xarray.h> +#include "radix-tree.h" + /* * Coding conventions in this file: * @@ -247,10 +249,6 @@ void *xas_load(struct xa_state *xas) } EXPORT_SYMBOL_GPL(xas_load); -/* Move the radix tree node cache here */ -extern struct kmem_cache *radix_tree_node_cachep; -extern void radix_tree_node_rcu_free(struct rcu_head *head); - #define XA_RCU_FREE ((struct xarray *)1) static void xa_node_free(struct xa_node *node) |