summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/Kconfig10
-rw-r--r--lib/Kconfig.debug22
-rw-r--r--lib/Kconfig.kasan34
-rw-r--r--lib/Kconfig.kfence2
-rw-r--r--lib/Kconfig.kmsan2
-rw-r--r--lib/crc-ccitt.c55
-rw-r--r--lib/crypto/aesgcm.c13
-rw-r--r--lib/crypto/mpi/ec.c3
-rw-r--r--lib/debugobjects.c200
-rw-r--r--lib/fortify_kunit.c5
-rw-r--r--lib/fw_table.c30
-rw-r--r--lib/idr.c2
-rw-r--r--lib/iov_iter.c13
-rw-r--r--lib/kunit/Makefile3
-rw-r--r--lib/kunit/attributes.c60
-rw-r--r--lib/kunit/debugfs.c102
-rw-r--r--lib/kunit/device-impl.h17
-rw-r--r--lib/kunit/device.c181
-rw-r--r--lib/kunit/executor.c68
-rw-r--r--lib/kunit/kunit-example-test.c87
-rw-r--r--lib/kunit/kunit-test.c139
-rw-r--r--lib/kunit/string-stream-test.c2
-rw-r--r--lib/kunit/string-stream.c2
-rw-r--r--lib/kunit/test.c48
-rw-r--r--lib/maple_tree.c1097
-rw-r--r--lib/overflow_kunit.c5
-rw-r--r--lib/raid6/s390vx.uc2
-rw-r--r--lib/stackdepot.c461
-rw-r--r--lib/test_bpf.c20
-rw-r--r--lib/test_firmware.c1
-rw-r--r--lib/test_ida.c42
-rw-r--r--lib/test_maple_tree.c331
-rw-r--r--lib/test_meminit.c2
-rw-r--r--lib/test_rhashtable.c1
-rw-r--r--lib/test_sysctl.c31
-rw-r--r--lib/trace_readwrite.c2
-rw-r--r--lib/ubsan.c7
-rw-r--r--lib/vsprintf.c11
38 files changed, 2102 insertions, 1011 deletions
diff --git a/lib/Kconfig b/lib/Kconfig
index 3ea1c830efab..5ddda7c2ed9b 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -713,10 +713,20 @@ config ARCH_STACKWALK
config STACKDEPOT
bool
select STACKTRACE
+ help
+ Stack depot: stack trace storage that avoids duplication
config STACKDEPOT_ALWAYS_INIT
bool
select STACKDEPOT
+ help
+ Always initialize stack depot during early boot
+
+config STACKDEPOT_MAX_FRAMES
+ int "Maximum number of frames in trace saved in stack depot"
+ range 1 256
+ default 64
+ depends on STACKDEPOT
config REF_TRACKER
bool
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index cc7d53d9dc01..97ce28f4d154 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -763,6 +763,8 @@ config DEBUG_STACK_USAGE
help
Enables the display of the minimum amount of free stack which each
task has ever had available in the sysrq-T and sysrq-P debug output.
+ Also emits a message to dmesg when a process exits if that process
+ used more stack space than previously exiting processes.
This option will slow down process creation somewhat.
@@ -1739,21 +1741,6 @@ config DEBUG_MAPLE_TREE
endmenu
-config DEBUG_CREDENTIALS
- bool "Debug credential management"
- depends on DEBUG_KERNEL
- help
- Enable this to turn on some debug checking for credential
- management. The additional code keeps track of the number of
- pointers from task_structs to any given cred struct, and checks to
- see that this number never exceeds the usage count of the cred
- struct.
-
- Furthermore, if SELinux is enabled, this also checks that the
- security pointer in the cred struct is never seen to be invalid.
-
- If unsure, say N.
-
source "kernel/rcu/Kconfig.debug"
config DEBUG_WQ_FORCE_RR_CPU
@@ -1985,7 +1972,6 @@ config FAULT_INJECTION
config FAILSLAB
bool "Fault-injection capability for kmalloc"
depends on FAULT_INJECTION
- depends on SLAB || SLUB
help
Provide fault-injection capability for kmalloc.
@@ -2103,10 +2089,6 @@ config KCOV
KCOV exposes kernel code coverage information in a form suitable
for coverage-guided fuzzing (randomized testing).
- If RANDOMIZE_BASE is enabled, PC values will not be stable across
- different machines and across reboots. If you need stable PC values,
- disable RANDOMIZE_BASE.
-
For more details, see Documentation/dev-tools/kcov.rst.
config KCOV_ENABLE_COMPARISONS
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index fdca89c05745..e6eda054ab27 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -37,7 +37,7 @@ menuconfig KASAN
(HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS)) && \
CC_HAS_WORKING_NOSANITIZE_ADDRESS) || \
HAVE_ARCH_KASAN_HW_TAGS
- depends on (SLUB && SYSFS && !SLUB_TINY) || (SLAB && !DEBUG_SLAB)
+ depends on SYSFS && !SLUB_TINY
select STACKDEPOT_ALWAYS_INIT
help
Enables KASAN (Kernel Address Sanitizer) - a dynamic memory safety
@@ -78,7 +78,7 @@ config KASAN_GENERIC
bool "Generic KASAN"
depends on HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
- select SLUB_DEBUG if SLUB
+ select SLUB_DEBUG
select CONSTRUCTORS
help
Enables Generic KASAN.
@@ -89,13 +89,11 @@ config KASAN_GENERIC
overhead of ~50% for dynamic allocations.
The performance slowdown is ~x3.
- (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
-
config KASAN_SW_TAGS
bool "Software Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_SW_TAGS && CC_HAS_KASAN_SW_TAGS
depends on CC_HAS_WORKING_NOSANITIZE_ADDRESS
- select SLUB_DEBUG if SLUB
+ select SLUB_DEBUG
select CONSTRUCTORS
help
Enables Software Tag-Based KASAN.
@@ -110,12 +108,9 @@ config KASAN_SW_TAGS
May potentially introduce problems related to pointer casting and
comparison, as it embeds a tag into the top byte of each pointer.
- (Incompatible with CONFIG_DEBUG_SLAB: the kernel does not boot.)
-
config KASAN_HW_TAGS
bool "Hardware Tag-Based KASAN"
depends on HAVE_ARCH_KASAN_HW_TAGS
- depends on SLUB
help
Enables Hardware Tag-Based KASAN.
@@ -134,7 +129,7 @@ endchoice
choice
prompt "Instrumentation type"
depends on KASAN_GENERIC || KASAN_SW_TAGS
- default KASAN_OUTLINE
+ default KASAN_INLINE if !ARCH_DISABLE_KASAN_INLINE
config KASAN_OUTLINE
bool "Outline instrumentation"
@@ -207,4 +202,25 @@ config KASAN_MODULE_TEST
A part of the KASAN test suite that is not integrated with KUnit.
Incompatible with Hardware Tag-Based KASAN.
+config KASAN_EXTRA_INFO
+ bool "Record and report more information"
+ depends on KASAN
+ help
+ Record and report more information to help us find the cause of the
+ bug and to help us correlate the error with other system events.
+
+ Currently, the CPU number and timestamp are additionally
+ recorded for each heap block at allocation and free time, and
+ 8 bytes will be added to each metadata structure that records
+ allocation or free information.
+
+ In Generic KASAN, each kmalloc-8 and kmalloc-16 object will add
+ 16 bytes of additional memory consumption, and each kmalloc-32
+ object will add 8 bytes of additional memory consumption, not
+ affecting other larger objects.
+
+ In SW_TAGS KASAN and HW_TAGS KASAN, depending on the stack_ring_size
+ boot parameter, it will add 8 * stack_ring_size bytes of additional
+ memory consumption.
+
endif # KASAN
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
index 459dda9ef619..6fbbebec683a 100644
--- a/lib/Kconfig.kfence
+++ b/lib/Kconfig.kfence
@@ -5,7 +5,7 @@ config HAVE_ARCH_KFENCE
menuconfig KFENCE
bool "KFENCE: low-overhead sampling-based memory safety error detector"
- depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
+ depends on HAVE_ARCH_KFENCE
select STACKTRACE
select IRQ_WORK
help
diff --git a/lib/Kconfig.kmsan b/lib/Kconfig.kmsan
index ef2c8f256c57..0541d7b079cc 100644
--- a/lib/Kconfig.kmsan
+++ b/lib/Kconfig.kmsan
@@ -11,7 +11,7 @@ config HAVE_KMSAN_COMPILER
config KMSAN
bool "KMSAN: detector of uninitialized values use"
depends on HAVE_ARCH_KMSAN && HAVE_KMSAN_COMPILER
- depends on SLUB && DEBUG_KERNEL && !KASAN && !KCSAN
+ depends on DEBUG_KERNEL && !KASAN && !KCSAN
depends on !PREEMPT_RT
select STACKDEPOT
select STACKDEPOT_ALWAYS_INIT
diff --git a/lib/crc-ccitt.c b/lib/crc-ccitt.c
index d1a7d29d2ac9..9cddf35d3b66 100644
--- a/lib/crc-ccitt.c
+++ b/lib/crc-ccitt.c
@@ -49,46 +49,6 @@ u16 const crc_ccitt_table[256] = {
};
EXPORT_SYMBOL(crc_ccitt_table);
-/*
- * Similar table to calculate CRC16 variant known as CRC-CCITT-FALSE
- * Reflected bits order, does not augment final value.
- */
-u16 const crc_ccitt_false_table[256] = {
- 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50A5, 0x60C6, 0x70E7,
- 0x8108, 0x9129, 0xA14A, 0xB16B, 0xC18C, 0xD1AD, 0xE1CE, 0xF1EF,
- 0x1231, 0x0210, 0x3273, 0x2252, 0x52B5, 0x4294, 0x72F7, 0x62D6,
- 0x9339, 0x8318, 0xB37B, 0xA35A, 0xD3BD, 0xC39C, 0xF3FF, 0xE3DE,
- 0x2462, 0x3443, 0x0420, 0x1401, 0x64E6, 0x74C7, 0x44A4, 0x5485,
- 0xA56A, 0xB54B, 0x8528, 0x9509, 0xE5EE, 0xF5CF, 0xC5AC, 0xD58D,
- 0x3653, 0x2672, 0x1611, 0x0630, 0x76D7, 0x66F6, 0x5695, 0x46B4,
- 0xB75B, 0xA77A, 0x9719, 0x8738, 0xF7DF, 0xE7FE, 0xD79D, 0xC7BC,
- 0x48C4, 0x58E5, 0x6886, 0x78A7, 0x0840, 0x1861, 0x2802, 0x3823,
- 0xC9CC, 0xD9ED, 0xE98E, 0xF9AF, 0x8948, 0x9969, 0xA90A, 0xB92B,
- 0x5AF5, 0x4AD4, 0x7AB7, 0x6A96, 0x1A71, 0x0A50, 0x3A33, 0x2A12,
- 0xDBFD, 0xCBDC, 0xFBBF, 0xEB9E, 0x9B79, 0x8B58, 0xBB3B, 0xAB1A,
- 0x6CA6, 0x7C87, 0x4CE4, 0x5CC5, 0x2C22, 0x3C03, 0x0C60, 0x1C41,
- 0xEDAE, 0xFD8F, 0xCDEC, 0xDDCD, 0xAD2A, 0xBD0B, 0x8D68, 0x9D49,
- 0x7E97, 0x6EB6, 0x5ED5, 0x4EF4, 0x3E13, 0x2E32, 0x1E51, 0x0E70,
- 0xFF9F, 0xEFBE, 0xDFDD, 0xCFFC, 0xBF1B, 0xAF3A, 0x9F59, 0x8F78,
- 0x9188, 0x81A9, 0xB1CA, 0xA1EB, 0xD10C, 0xC12D, 0xF14E, 0xE16F,
- 0x1080, 0x00A1, 0x30C2, 0x20E3, 0x5004, 0x4025, 0x7046, 0x6067,
- 0x83B9, 0x9398, 0xA3FB, 0xB3DA, 0xC33D, 0xD31C, 0xE37F, 0xF35E,
- 0x02B1, 0x1290, 0x22F3, 0x32D2, 0x4235, 0x5214, 0x6277, 0x7256,
- 0xB5EA, 0xA5CB, 0x95A8, 0x8589, 0xF56E, 0xE54F, 0xD52C, 0xC50D,
- 0x34E2, 0x24C3, 0x14A0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
- 0xA7DB, 0xB7FA, 0x8799, 0x97B8, 0xE75F, 0xF77E, 0xC71D, 0xD73C,
- 0x26D3, 0x36F2, 0x0691, 0x16B0, 0x6657, 0x7676, 0x4615, 0x5634,
- 0xD94C, 0xC96D, 0xF90E, 0xE92F, 0x99C8, 0x89E9, 0xB98A, 0xA9AB,
- 0x5844, 0x4865, 0x7806, 0x6827, 0x18C0, 0x08E1, 0x3882, 0x28A3,
- 0xCB7D, 0xDB5C, 0xEB3F, 0xFB1E, 0x8BF9, 0x9BD8, 0xABBB, 0xBB9A,
- 0x4A75, 0x5A54, 0x6A37, 0x7A16, 0x0AF1, 0x1AD0, 0x2AB3, 0x3A92,
- 0xFD2E, 0xED0F, 0xDD6C, 0xCD4D, 0xBDAA, 0xAD8B, 0x9DE8, 0x8DC9,
- 0x7C26, 0x6C07, 0x5C64, 0x4C45, 0x3CA2, 0x2C83, 0x1CE0, 0x0CC1,
- 0xEF1F, 0xFF3E, 0xCF5D, 0xDF7C, 0xAF9B, 0xBFBA, 0x8FD9, 0x9FF8,
- 0x6E17, 0x7E36, 0x4E55, 0x5E74, 0x2E93, 0x3EB2, 0x0ED1, 0x1EF0
-};
-EXPORT_SYMBOL(crc_ccitt_false_table);
-
/**
* crc_ccitt - recompute the CRC (CRC-CCITT variant) for the data
* buffer
@@ -104,20 +64,5 @@ u16 crc_ccitt(u16 crc, u8 const *buffer, size_t len)
}
EXPORT_SYMBOL(crc_ccitt);
-/**
- * crc_ccitt_false - recompute the CRC (CRC-CCITT-FALSE variant)
- * for the data buffer
- * @crc: previous CRC value
- * @buffer: data pointer
- * @len: number of bytes in the buffer
- */
-u16 crc_ccitt_false(u16 crc, u8 const *buffer, size_t len)
-{
- while (len--)
- crc = crc_ccitt_false_byte(crc, *buffer++);
- return crc;
-}
-EXPORT_SYMBOL(crc_ccitt_false);
-
MODULE_DESCRIPTION("CRC-CCITT calculations");
MODULE_LICENSE("GPL");
diff --git a/lib/crypto/aesgcm.c b/lib/crypto/aesgcm.c
index c632d6e17af8..6bba6473fdf3 100644
--- a/lib/crypto/aesgcm.c
+++ b/lib/crypto/aesgcm.c
@@ -73,6 +73,19 @@ static void aesgcm_ghash(be128 *ghash, const be128 *key, const void *src,
}
}
+/**
+ * aesgcm_mac - Generates the authentication tag using AES-GCM algorithm.
+ * @ctx: The data structure that will hold the AES-GCM key schedule
+ * @src: The input source data.
+ * @src_len: Length of the source data.
+ * @assoc: Points to the associated data.
+ * @assoc_len: Length of the associated data values.
+ * @ctr: Points to the counter value.
+ * @authtag: The output buffer for the authentication tag.
+ *
+ * It takes in the AES-GCM context, source data, associated data, counter value,
+ * and an output buffer for the authentication tag.
+ */
static void aesgcm_mac(const struct aesgcm_ctx *ctx, const u8 *src, int src_len,
const u8 *assoc, int assoc_len, __be32 *ctr, u8 *authtag)
{
diff --git a/lib/crypto/mpi/ec.c b/lib/crypto/mpi/ec.c
index 40f5908e57a4..e16dca1e23d5 100644
--- a/lib/crypto/mpi/ec.c
+++ b/lib/crypto/mpi/ec.c
@@ -584,6 +584,9 @@ void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model,
ctx->a = mpi_copy(a);
ctx->b = mpi_copy(b);
+ ctx->d = NULL;
+ ctx->t.two_inv_p = NULL;
+
ctx->t.p_barrett = use_barrett > 0 ? mpi_barrett_init(ctx->p, 0) : NULL;
mpi_ec_get_reset(ctx);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index 2a8e9d63fbe3..fb12a9bacd2f 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -620,9 +620,8 @@ static void debug_objects_fill_pool(void)
static void
__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
debug_objects_fill_pool();
@@ -643,24 +642,18 @@ __debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_INIT;
- break;
-
- case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "init");
- debug_object_fixup(descr->fixup_init, addr, state);
- return;
-
- case ODEBUG_STATE_DESTROYED:
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "init");
return;
default:
break;
}
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "init");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_init, addr, o.state);
}
/**
@@ -701,11 +694,9 @@ EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
{
struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
- enum debug_obj_state state;
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- int ret;
if (!debug_objects_enabled)
return 0;
@@ -717,49 +708,38 @@ int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object_or_alloc(addr, db, descr, false, true);
- if (likely(!IS_ERR_OR_NULL(obj))) {
- bool print_object = false;
-
+ if (unlikely(!obj)) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_objects_oom();
+ return 0;
+ } else if (likely(!IS_ERR(obj))) {
switch (obj->state) {
- case ODEBUG_STATE_INIT:
- case ODEBUG_STATE_INACTIVE:
- obj->state = ODEBUG_STATE_ACTIVE;
- ret = 0;
- break;
-
case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr, state);
- return ret ? 0 : -EINVAL;
-
case ODEBUG_STATE_DESTROYED:
- print_object = true;
- ret = -EINVAL;
+ o = *obj;
break;
+ case ODEBUG_STATE_INIT:
+ case ODEBUG_STATE_INACTIVE:
+ obj->state = ODEBUG_STATE_ACTIVE;
+ fallthrough;
default:
- ret = 0;
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return 0;
}
- raw_spin_unlock_irqrestore(&db->lock, flags);
- if (print_object)
- debug_print_object(obj, "activate");
- return ret;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "activate");
- /* If NULL the allocation has hit OOM */
- if (!obj) {
- debug_objects_oom();
- return 0;
+ switch (o.state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_NOTAVAILABLE:
+ if (debug_object_fixup(descr->fixup_activate, addr, o.state))
+ return 0;
+ fallthrough;
+ default:
+ return -EINVAL;
}
-
- /* Object is neither static nor tracked. It's not initialized */
- debug_print_object(&o, "activate");
- ret = debug_object_fixup(descr->fixup_activate, addr, ODEBUG_STATE_NOTAVAILABLE);
- return ret ? 0 : -EINVAL;
}
EXPORT_SYMBOL_GPL(debug_object_activate);
@@ -770,10 +750,10 @@ EXPORT_SYMBOL_GPL(debug_object_activate);
*/
void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -785,33 +765,24 @@ void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
obj = lookup_object(addr, db);
if (obj) {
switch (obj->state) {
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
case ODEBUG_STATE_ACTIVE:
- if (!obj->astate)
- obj->state = ODEBUG_STATE_INACTIVE;
- else
- print_object = true;
- break;
-
- case ODEBUG_STATE_DESTROYED:
- print_object = true;
- break;
+ if (obj->astate)
+ break;
+ obj->state = ODEBUG_STATE_INACTIVE;
+ fallthrough;
default:
- break;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
}
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "deactivate");
- } else if (print_object) {
- debug_print_object(obj, "deactivate");
- }
+ debug_print_object(&o, "deactivate");
}
EXPORT_SYMBOL_GPL(debug_object_deactivate);
@@ -822,11 +793,9 @@ EXPORT_SYMBOL_GPL(debug_object_deactivate);
*/
void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -836,32 +805,31 @@ void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
+ case ODEBUG_STATE_ACTIVE:
+ case ODEBUG_STATE_DESTROYED:
+ break;
case ODEBUG_STATE_NONE:
case ODEBUG_STATE_INIT:
case ODEBUG_STATE_INACTIVE:
obj->state = ODEBUG_STATE_DESTROYED;
- break;
- case ODEBUG_STATE_ACTIVE:
- state = obj->state;
+ fallthrough;
+ default:
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "destroy");
- debug_object_fixup(descr->fixup_destroy, addr, state);
return;
-
- case ODEBUG_STATE_DESTROYED:
- print_object = true;
- break;
- default:
- break;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (print_object)
- debug_print_object(obj, "destroy");
+ debug_print_object(&o, "destroy");
+
+ if (o.state == ODEBUG_STATE_ACTIVE)
+ debug_object_fixup(descr->fixup_destroy, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_destroy);
@@ -872,9 +840,8 @@ EXPORT_SYMBOL_GPL(debug_object_destroy);
*/
void debug_object_free(void *addr, const struct debug_obj_descr *descr)
{
- enum debug_obj_state state;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
- struct debug_obj *obj;
unsigned long flags;
if (!debug_objects_enabled)
@@ -885,24 +852,26 @@ void debug_object_free(void *addr, const struct debug_obj_descr *descr)
raw_spin_lock_irqsave(&db->lock, flags);
obj = lookup_object(addr, db);
- if (!obj)
- goto out_unlock;
+ if (!obj) {
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
+ }
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- state = obj->state;
- raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "free");
- debug_object_fixup(descr->fixup_free, addr, state);
- return;
+ break;
default:
hlist_del(&obj->node);
raw_spin_unlock_irqrestore(&db->lock, flags);
free_object(obj);
return;
}
-out_unlock:
+
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
+ debug_print_object(&o, "free");
+
+ debug_object_fixup(descr->fixup_free, addr, o.state);
}
EXPORT_SYMBOL_GPL(debug_object_free);
@@ -954,10 +923,10 @@ void
debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
unsigned int expect, unsigned int next)
{
+ struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
struct debug_bucket *db;
struct debug_obj *obj;
unsigned long flags;
- bool print_object = false;
if (!debug_objects_enabled)
return;
@@ -970,28 +939,19 @@ debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
if (obj) {
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- if (obj->astate == expect)
- obj->astate = next;
- else
- print_object = true;
- break;
-
+ if (obj->astate != expect)
+ break;
+ obj->astate = next;
+ raw_spin_unlock_irqrestore(&db->lock, flags);
+ return;
default:
- print_object = true;
break;
}
+ o = *obj;
}
raw_spin_unlock_irqrestore(&db->lock, flags);
- if (!obj) {
- struct debug_obj o = { .object = addr,
- .state = ODEBUG_STATE_NOTAVAILABLE,
- .descr = descr };
-
- debug_print_object(&o, "active_state");
- } else if (print_object) {
- debug_print_object(obj, "active_state");
- }
+ debug_print_object(&o, "active_state");
}
EXPORT_SYMBOL_GPL(debug_object_active_state);
@@ -999,12 +959,10 @@ EXPORT_SYMBOL_GPL(debug_object_active_state);
static void __debug_check_no_obj_freed(const void *address, unsigned long size)
{
unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
- const struct debug_obj_descr *descr;
- enum debug_obj_state state;
+ int cnt, objs_checked = 0;
+ struct debug_obj *obj, o;
struct debug_bucket *db;
struct hlist_node *tmp;
- struct debug_obj *obj;
- int cnt, objs_checked = 0;
saddr = (unsigned long) address;
eaddr = saddr + size;
@@ -1026,12 +984,10 @@ repeat:
switch (obj->state) {
case ODEBUG_STATE_ACTIVE:
- descr = obj->descr;
- state = obj->state;
+ o = *obj;
raw_spin_unlock_irqrestore(&db->lock, flags);
- debug_print_object(obj, "free");
- debug_object_fixup(descr->fixup_free,
- (void *) oaddr, state);
+ debug_print_object(&o, "free");
+ debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
goto repeat;
default:
hlist_del(&obj->node);
diff --git a/lib/fortify_kunit.c b/lib/fortify_kunit.c
index c8c33cbaae9e..2e4fedc81621 100644
--- a/lib/fortify_kunit.c
+++ b/lib/fortify_kunit.c
@@ -15,6 +15,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <kunit/device.h>
#include <kunit/test.h>
#include <linux/device.h>
#include <linux/slab.h>
@@ -269,7 +270,7 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
size_t len; \
\
/* Create dummy device for devm_kmalloc()-family tests. */ \
- dev = root_device_register(dev_name); \
+ dev = kunit_device_register(test, dev_name); \
KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev), \
"Cannot register test device\n"); \
\
@@ -303,7 +304,7 @@ DEFINE_ALLOC_SIZE_TEST_PAIR(kvmalloc)
checker(len, devm_kmemdup(dev, "Ohai", len, gfp), \
devm_kfree(dev, p)); \
\
- device_unregister(dev); \
+ kunit_device_unregister(test, dev); \
} while (0)
DEFINE_ALLOC_SIZE_TEST_PAIR(devm_kmalloc)
diff --git a/lib/fw_table.c b/lib/fw_table.c
index 294df54e33b6..c49a09ee3853 100644
--- a/lib/fw_table.c
+++ b/lib/fw_table.c
@@ -85,11 +85,6 @@ acpi_get_subtable_type(char *id)
return ACPI_SUBTABLE_COMMON;
}
-static __init_or_acpilib bool has_handler(struct acpi_subtable_proc *proc)
-{
- return proc->handler || proc->handler_arg;
-}
-
static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
union acpi_subtable_headers *hdr,
unsigned long end)
@@ -133,7 +128,6 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
unsigned long table_end, subtable_len, entry_len;
struct acpi_subtable_entry entry;
int count = 0;
- int errs = 0;
int i;
table_end = (unsigned long)table_header + table_header->length;
@@ -145,25 +139,19 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
((unsigned long)table_header + table_size);
subtable_len = acpi_get_subtable_header_length(&entry);
- while (((unsigned long)entry.hdr) + subtable_len < table_end) {
- if (max_entries && count >= max_entries)
- break;
-
+ while (((unsigned long)entry.hdr) + subtable_len < table_end) {
for (i = 0; i < proc_num; i++) {
if (acpi_get_entry_type(&entry) != proc[i].id)
continue;
- if (!has_handler(&proc[i]) ||
- (!errs &&
- call_handler(&proc[i], entry.hdr, table_end))) {
- errs++;
- continue;
- }
+
+ if (!max_entries || count < max_entries)
+ if (call_handler(&proc[i], entry.hdr, table_end))
+ return -EINVAL;
proc[i].count++;
+ count++;
break;
}
- if (i != proc_num)
- count++;
/*
* If entry->length is 0, break from this loop to avoid
@@ -180,9 +168,9 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
}
if (max_entries && count > max_entries) {
- pr_warn("[%4.4s:0x%02x] found the maximum %i entries\n",
- id, proc->id, count);
+ pr_warn("[%4.4s:0x%02x] ignored %i entries of %i found\n",
+ id, proc->id, count - max_entries, count);
}
- return errs ? -EINVAL : count;
+ return count;
}
diff --git a/lib/idr.c b/lib/idr.c
index 13f2758c2377..da36054c3ca0 100644
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -508,7 +508,7 @@ void ida_free(struct ida *ida, unsigned int id)
goto delete;
xas_store(&xas, xa_mk_value(v));
} else {
- if (!test_bit(bit, bitmap->bitmap))
+ if (!bitmap || !test_bit(bit, bitmap->bitmap))
goto err;
__clear_bit(bit, bitmap->bitmap);
xas_set_mark(&xas, XA_FREE_MARK);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 8ff6824a1005..e0aa6b440ca5 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -1369,19 +1369,6 @@ ssize_t import_iovec(int type, const struct iovec __user *uvec,
}
EXPORT_SYMBOL(import_iovec);
-int import_single_range(int rw, void __user *buf, size_t len,
- struct iovec *iov, struct iov_iter *i)
-{
- if (len > MAX_RW_COUNT)
- len = MAX_RW_COUNT;
- if (unlikely(!access_ok(buf, len)))
- return -EFAULT;
-
- iov_iter_ubuf(i, rw, buf, len);
- return 0;
-}
-EXPORT_SYMBOL(import_single_range);
-
int import_ubuf(int rw, void __user *buf, size_t len, struct iov_iter *i)
{
if (len > MAX_RW_COUNT)
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index 46f75f23dfe4..309659a32a78 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -7,7 +7,8 @@ kunit-objs += test.o \
assert.o \
try-catch.o \
executor.o \
- attributes.o
+ attributes.o \
+ device.o
ifeq ($(CONFIG_KUNIT_DEBUGFS),y)
kunit-objs += debugfs.o
diff --git a/lib/kunit/attributes.c b/lib/kunit/attributes.c
index 1b512f7e1838..2cf04cc09372 100644
--- a/lib/kunit/attributes.c
+++ b/lib/kunit/attributes.c
@@ -58,6 +58,16 @@ static const char *attr_enum_to_string(void *attr, const char * const str_list[]
return str_list[val];
}
+static const char *attr_bool_to_string(void *attr, bool *to_free)
+{
+ bool val = (bool)attr;
+
+ *to_free = false;
+ if (val)
+ return "true";
+ return "false";
+}
+
static const char *attr_speed_to_string(void *attr, bool *to_free)
{
return attr_enum_to_string(attr, speed_str_list, to_free);
@@ -166,6 +176,37 @@ static int attr_string_filter(void *attr, const char *input, int *err)
return false;
}
+static int attr_bool_filter(void *attr, const char *input, int *err)
+{
+ int i, input_int = -1;
+ long val = (long)attr;
+ const char *input_str = NULL;
+
+ for (i = 0; input[i]; i++) {
+ if (!strchr(op_list, input[i])) {
+ input_str = input + i;
+ break;
+ }
+ }
+
+ if (!input_str) {
+ *err = -EINVAL;
+ pr_err("kunit executor: filter value not found: %s\n", input);
+ return false;
+ }
+
+ if (!strcmp(input_str, "true"))
+ input_int = (int)true;
+ else if (!strcmp(input_str, "false"))
+ input_int = (int)false;
+ else {
+ *err = -EINVAL;
+ pr_err("kunit executor: invalid filter input: %s\n", input);
+ return false;
+ }
+
+ return int_filter(val, input, input_int, err);
+}
/* Get Attribute Methods */
@@ -194,6 +235,17 @@ static void *attr_module_get(void *test_or_suite, bool is_test)
return (void *) "";
}
+static void *attr_is_init_get(void *test_or_suite, bool is_test)
+{
+ struct kunit_suite *suite = is_test ? NULL : test_or_suite;
+ struct kunit_case *test = is_test ? test_or_suite : NULL;
+
+ if (test)
+ return ((void *) NULL);
+ else
+ return ((void *) suite->is_init);
+}
+
/* List of all Test Attributes */
static struct kunit_attr kunit_attr_list[] = {
@@ -212,6 +264,14 @@ static struct kunit_attr kunit_attr_list[] = {
.filter = attr_string_filter,
.attr_default = (void *)"",
.print = PRINT_SUITE,
+ },
+ {
+ .name = "is_init",
+ .get_attr = attr_is_init_get,
+ .to_string = attr_bool_to_string,
+ .filter = attr_bool_filter,
+ .attr_default = (void *)false,
+ .print = PRINT_SUITE,
}
};
diff --git a/lib/kunit/debugfs.c b/lib/kunit/debugfs.c
index 270d185737e6..d548750a325a 100644
--- a/lib/kunit/debugfs.c
+++ b/lib/kunit/debugfs.c
@@ -8,12 +8,14 @@
#include <linux/module.h>
#include <kunit/test.h>
+#include <kunit/test-bug.h>
#include "string-stream.h"
#include "debugfs.h"
#define KUNIT_DEBUGFS_ROOT "kunit"
#define KUNIT_DEBUGFS_RESULTS "results"
+#define KUNIT_DEBUGFS_RUN "run"
/*
* Create a debugfs representation of test suites:
@@ -21,6 +23,8 @@
* Path Semantics
* /sys/kernel/debug/kunit/<testsuite>/results Show results of last run for
* testsuite
+ * /sys/kernel/debug/kunit/<testsuite>/run Write to this file to trigger
+ * testsuite to run
*
*/
@@ -60,12 +64,14 @@ static void debugfs_print_result(struct seq_file *seq, struct string_stream *log
static int debugfs_print_results(struct seq_file *seq, void *v)
{
struct kunit_suite *suite = (struct kunit_suite *)seq->private;
- enum kunit_status success = kunit_suite_has_succeeded(suite);
+ enum kunit_status success;
struct kunit_case *test_case;
if (!suite)
return 0;
+ success = kunit_suite_has_succeeded(suite);
+
/* Print KTAP header so the debugfs log can be parsed as valid KTAP. */
seq_puts(seq, "KTAP version 1\n");
seq_puts(seq, "1..1\n");
@@ -99,6 +105,51 @@ static int debugfs_results_open(struct inode *inode, struct file *file)
return single_open(file, debugfs_print_results, suite);
}
+/*
+ * Print a usage message to the debugfs "run" file
+ * (/sys/kernel/debug/kunit/<testsuite>/run) if opened.
+ */
+static int debugfs_print_run(struct seq_file *seq, void *v)
+{
+ struct kunit_suite *suite = (struct kunit_suite *)seq->private;
+
+ seq_puts(seq, "Write to this file to trigger the test suite to run.\n");
+ seq_printf(seq, "usage: echo \"any string\" > /sys/kernel/debugfs/kunit/%s/run\n",
+ suite->name);
+ return 0;
+}
+
+/*
+ * The debugfs "run" file (/sys/kernel/debug/kunit/<testsuite>/run)
+ * contains no information. Write to the file to trigger the test suite
+ * to run.
+ */
+static int debugfs_run_open(struct inode *inode, struct file *file)
+{
+ struct kunit_suite *suite;
+
+ suite = (struct kunit_suite *)inode->i_private;
+
+ return single_open(file, debugfs_print_run, suite);
+}
+
+/*
+ * Trigger a test suite to run by writing to the suite's "run" debugfs
+ * file found at: /sys/kernel/debug/kunit/<testsuite>/run
+ *
+ * Note: what is written to this file will not be saved.
+ */
+static ssize_t debugfs_run(struct file *file,
+ const char __user *buf, size_t count, loff_t *ppos)
+{
+ struct inode *f_inode = file->f_inode;
+ struct kunit_suite *suite = (struct kunit_suite *) f_inode->i_private;
+
+ __kunit_test_suites_init(&suite, 1);
+
+ return count;
+}
+
static const struct file_operations debugfs_results_fops = {
.open = debugfs_results_open,
.read = seq_read,
@@ -106,17 +157,43 @@ static const struct file_operations debugfs_results_fops = {
.release = debugfs_release,
};
+static const struct file_operations debugfs_run_fops = {
+ .open = debugfs_run_open,
+ .read = seq_read,
+ .write = debugfs_run,
+ .llseek = seq_lseek,
+ .release = debugfs_release,
+};
+
void kunit_debugfs_create_suite(struct kunit_suite *suite)
{
struct kunit_case *test_case;
+ struct string_stream *stream;
+
+ /* If suite log already allocated, do not create new debugfs files. */
+ if (suite->log)
+ return;
- /* Allocate logs before creating debugfs representation. */
- suite->log = alloc_string_stream(GFP_KERNEL);
- string_stream_set_append_newlines(suite->log, true);
+ /*
+ * Allocate logs before creating debugfs representation.
+ * The suite->log and test_case->log pointer are expected to be NULL
+ * if there isn't a log, so only set it if the log stream was created
+ * successfully.
+ */
+ stream = alloc_string_stream(GFP_KERNEL);
+ if (IS_ERR_OR_NULL(stream))
+ return;
+
+ string_stream_set_append_newlines(stream, true);
+ suite->log = stream;
kunit_suite_for_each_test_case(suite, test_case) {
- test_case->log = alloc_string_stream(GFP_KERNEL);
- string_stream_set_append_newlines(test_case->log, true);
+ stream = alloc_string_stream(GFP_KERNEL);
+ if (IS_ERR_OR_NULL(stream))
+ goto err;
+
+ string_stream_set_append_newlines(stream, true);
+ test_case->log = stream;
}
suite->debugfs = debugfs_create_dir(suite->name, debugfs_rootdir);
@@ -124,6 +201,19 @@ void kunit_debugfs_create_suite(struct kunit_suite *suite)
debugfs_create_file(KUNIT_DEBUGFS_RESULTS, S_IFREG | 0444,
suite->debugfs,
suite, &debugfs_results_fops);
+
+ /* Do not create file to re-run test if test runs on init */
+ if (!suite->is_init) {
+ debugfs_create_file(KUNIT_DEBUGFS_RUN, S_IFREG | 0644,
+ suite->debugfs,
+ suite, &debugfs_run_fops);
+ }
+ return;
+
+err:
+ string_stream_destroy(suite->log);
+ kunit_suite_for_each_test_case(suite, test_case)
+ string_stream_destroy(test_case->log);
}
void kunit_debugfs_destroy_suite(struct kunit_suite *suite)
diff --git a/lib/kunit/device-impl.h b/lib/kunit/device-impl.h
new file mode 100644
index 000000000000..54bd55836405
--- /dev/null
+++ b/lib/kunit/device-impl.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KUnit internal header for device helpers
+ *
+ * Header for KUnit-internal driver / bus management.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#ifndef _KUNIT_DEVICE_IMPL_H
+#define _KUNIT_DEVICE_IMPL_H
+
+// For internal use only -- registers the kunit_bus.
+int kunit_bus_init(void);
+
+#endif //_KUNIT_DEVICE_IMPL_H
diff --git a/lib/kunit/device.c b/lib/kunit/device.c
new file mode 100644
index 000000000000..f5371287b375
--- /dev/null
+++ b/lib/kunit/device.c
@@ -0,0 +1,181 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * KUnit-managed device implementation
+ *
+ * Implementation of struct kunit_device helpers for fake devices whose
+ * lifecycle is managed by KUnit.
+ *
+ * Copyright (C) 2023, Google LLC.
+ * Author: David Gow <davidgow@google.com>
+ */
+
+#include <linux/device.h>
+
+#include <kunit/test.h>
+#include <kunit/device.h>
+#include <kunit/resource.h>
+
+#include "device-impl.h"
+
+/* Wrappers for use with kunit_add_action() */
+KUNIT_DEFINE_ACTION_WRAPPER(device_unregister_wrapper, device_unregister, struct device *);
+KUNIT_DEFINE_ACTION_WRAPPER(driver_unregister_wrapper, driver_unregister, struct device_driver *);
+
+/* The root device for the KUnit bus, parent of all kunit_devices. */
+static struct device *kunit_bus_device;
+
+/* A device owned by a KUnit test. */
+struct kunit_device {
+ struct device dev;
+ /* The KUnit test which owns this device. */
+ struct kunit *owner;
+ /* If the driver is managed by KUnit and unique to this device. */
+ const struct device_driver *driver;
+};
+
+#define to_kunit_device(d) container_of_const(d, struct kunit_device, dev)
+
+static struct bus_type kunit_bus_type = {
+ .name = "kunit",
+};
+
+/* Register the 'kunit_bus' used for fake devices. */
+int kunit_bus_init(void)
+{
+ int error;
+
+ kunit_bus_device = root_device_register("kunit");
+ if (!kunit_bus_device)
+ return -ENOMEM;
+
+ error = bus_register(&kunit_bus_type);
+ if (error)
+ bus_unregister(&kunit_bus_type);
+ return error;
+}
+
+/* Release a 'fake' KUnit device. */
+static void kunit_device_release(struct device *d)
+{
+ kfree(to_kunit_device(d));
+}
+
+/*
+ * Create and register a KUnit-managed struct device_driver on the kunit_bus.
+ * Returns an error pointer on failure.
+ */
+struct device_driver *kunit_driver_create(struct kunit *test, const char *name)
+{
+ struct device_driver *driver;
+ int err = -ENOMEM;
+
+ driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+
+ if (!driver)
+ return ERR_PTR(err);
+
+ driver->name = name;
+ driver->bus = &kunit_bus_type;
+ driver->owner = THIS_MODULE;
+
+ err = driver_register(driver);
+ if (err) {
+ kunit_kfree(test, driver);
+ return ERR_PTR(err);
+ }
+
+ kunit_add_action(test, driver_unregister_wrapper, driver);
+ return driver;
+}
+EXPORT_SYMBOL_GPL(kunit_driver_create);
+
+/* Helper which creates a kunit_device, attaches it to the kunit_bus*/
+static struct kunit_device *kunit_device_register_internal(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv)
+{
+ struct kunit_device *kunit_dev;
+ int err = -ENOMEM;
+
+ kunit_dev = kzalloc(sizeof(*kunit_dev), GFP_KERNEL);
+ if (!kunit_dev)
+ return ERR_PTR(err);
+
+ kunit_dev->owner = test;
+
+ err = dev_set_name(&kunit_dev->dev, "%s.%s", test->name, name);
+ if (err) {
+ kfree(kunit_dev);
+ return ERR_PTR(err);
+ }
+
+ kunit_dev->dev.release = kunit_device_release;
+ kunit_dev->dev.bus = &kunit_bus_type;
+ kunit_dev->dev.parent = kunit_bus_device;
+
+ err = device_register(&kunit_dev->dev);
+ if (err) {
+ put_device(&kunit_dev->dev);
+ return ERR_PTR(err);
+ }
+
+ kunit_add_action(test, device_unregister_wrapper, &kunit_dev->dev);
+
+ return kunit_dev;
+}
+
+/*
+ * Create and register a new KUnit-managed device, using the user-supplied device_driver.
+ * On failure, returns an error pointer.
+ */
+struct device *kunit_device_register_with_driver(struct kunit *test,
+ const char *name,
+ const struct device_driver *drv)
+{
+ struct kunit_device *kunit_dev = kunit_device_register_internal(test, name, drv);
+
+ if (IS_ERR_OR_NULL(kunit_dev))
+ return ERR_CAST(kunit_dev);
+
+ return &kunit_dev->dev;
+}
+EXPORT_SYMBOL_GPL(kunit_device_register_with_driver);
+
+/*
+ * Create and register a new KUnit-managed device, including a matching device_driver.
+ * On failure, returns an error pointer.
+ */
+struct device *kunit_device_register(struct kunit *test, const char *name)
+{
+ struct device_driver *drv;
+ struct kunit_device *dev;
+
+ drv = kunit_driver_create(test, name);
+ if (IS_ERR(drv))
+ return ERR_CAST(drv);
+
+ dev = kunit_device_register_internal(test, name, drv);
+ if (IS_ERR(dev)) {
+ kunit_release_action(test, driver_unregister_wrapper, (void *)drv);
+ return ERR_CAST(dev);
+ }
+
+ /* Request the driver be freed. */
+ dev->driver = drv;
+
+
+ return &dev->dev;
+}
+EXPORT_SYMBOL_GPL(kunit_device_register);
+
+/* Unregisters a KUnit-managed device early (including the driver, if automatically created). */
+void kunit_device_unregister(struct kunit *test, struct device *dev)
+{
+ const struct device_driver *driver = to_kunit_device(dev)->driver;
+
+ kunit_release_action(test, device_unregister_wrapper, dev);
+ if (driver)
+ kunit_release_action(test, driver_unregister_wrapper, (void *)driver);
+}
+EXPORT_SYMBOL_GPL(kunit_device_unregister);
+
diff --git a/lib/kunit/executor.c b/lib/kunit/executor.c
index 1236b3cd2fbb..717b9599036b 100644
--- a/lib/kunit/executor.c
+++ b/lib/kunit/executor.c
@@ -12,6 +12,8 @@
*/
extern struct kunit_suite * const __kunit_suites_start[];
extern struct kunit_suite * const __kunit_suites_end[];
+extern struct kunit_suite * const __kunit_init_suites_start[];
+extern struct kunit_suite * const __kunit_init_suites_end[];
static char *action_param;
@@ -292,6 +294,37 @@ void kunit_exec_list_tests(struct kunit_suite_set *suite_set, bool include_attr)
}
}
+struct kunit_suite_set kunit_merge_suite_sets(struct kunit_suite_set init_suite_set,
+ struct kunit_suite_set suite_set)
+{
+ struct kunit_suite_set total_suite_set = {NULL, NULL};
+ struct kunit_suite **total_suite_start = NULL;
+ size_t init_num_suites, num_suites, suite_size;
+ int i = 0;
+
+ init_num_suites = init_suite_set.end - init_suite_set.start;
+ num_suites = suite_set.end - suite_set.start;
+ suite_size = sizeof(suite_set.start);
+
+ /* Allocate memory for array of all kunit suites */
+ total_suite_start = kmalloc_array(init_num_suites + num_suites, suite_size, GFP_KERNEL);
+ if (!total_suite_start)
+ return total_suite_set;
+
+ /* Append and mark init suites and then append all other kunit suites */
+ memcpy(total_suite_start, init_suite_set.start, init_num_suites * suite_size);
+ for (i = 0; i < init_num_suites; i++)
+ total_suite_start[i]->is_init = true;
+
+ memcpy(total_suite_start + init_num_suites, suite_set.start, num_suites * suite_size);
+
+ /* Set kunit suite set start and end */
+ total_suite_set.start = total_suite_start;
+ total_suite_set.end = total_suite_start + (init_num_suites + num_suites);
+
+ return total_suite_set;
+}
+
#if IS_BUILTIN(CONFIG_KUNIT)
static char *kunit_shutdown;
@@ -313,21 +346,41 @@ static void kunit_handle_shutdown(void)
int kunit_run_all_tests(void)
{
- struct kunit_suite_set suite_set = {
+ struct kunit_suite_set suite_set = {NULL, NULL};
+ struct kunit_suite_set filtered_suite_set = {NULL, NULL};
+ struct kunit_suite_set init_suite_set = {
+ __kunit_init_suites_start, __kunit_init_suites_end,
+ };
+ struct kunit_suite_set normal_suite_set = {
__kunit_suites_start, __kunit_suites_end,
};
+ size_t init_num_suites = init_suite_set.end - init_suite_set.start;
int err = 0;
+
+ if (init_num_suites > 0) {
+ suite_set = kunit_merge_suite_sets(init_suite_set, normal_suite_set);
+ if (!suite_set.start)
+ goto out;
+ } else
+ suite_set = normal_suite_set;
+
if (!kunit_enabled()) {
pr_info("kunit: disabled\n");
- goto out;
+ goto free_out;
}
if (filter_glob_param || filter_param) {
- suite_set = kunit_filter_suites(&suite_set, filter_glob_param,
+ filtered_suite_set = kunit_filter_suites(&suite_set, filter_glob_param,
filter_param, filter_action_param, &err);
+
+ /* Free original suite set before using filtered suite set */
+ if (init_num_suites > 0)
+ kfree(suite_set.start);
+ suite_set = filtered_suite_set;
+
if (err) {
pr_err("kunit executor: error filtering suites: %d\n", err);
- goto out;
+ goto free_out;
}
}
@@ -340,9 +393,12 @@ int kunit_run_all_tests(void)
else
pr_err("kunit executor: unknown action '%s'\n", action_param);
- if (filter_glob_param || filter_param) { /* a copy was made of each suite */
+free_out:
+ if (filter_glob_param || filter_param)
kunit_free_suite_set(suite_set);
- }
+ else if (init_num_suites > 0)
+ /* Don't use kunit_free_suite_set because suites aren't individually allocated */
+ kfree(suite_set.start);
out:
kunit_handle_shutdown();
diff --git a/lib/kunit/kunit-example-test.c b/lib/kunit/kunit-example-test.c
index 6bb5c2ef6696..798924f7cc86 100644
--- a/lib/kunit/kunit-example-test.c
+++ b/lib/kunit/kunit-example-test.c
@@ -169,6 +169,16 @@ static int subtract_one(int i)
}
/*
+ * If the function to be replaced is static within a module it is
+ * useful to export a pointer to that function instead of having
+ * to change the static function to a non-static exported function.
+ *
+ * This pointer simulates a module exporting a pointer to a static
+ * function.
+ */
+static int (* const add_one_fn_ptr)(int i) = add_one;
+
+/*
* This test shows the use of static stubs.
*/
static void example_static_stub_test(struct kunit *test)
@@ -187,6 +197,30 @@ static void example_static_stub_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, add_one(1), 2);
}
+/*
+ * This test shows the use of static stubs when the function being
+ * replaced is provided as a pointer-to-function instead of the
+ * actual function. This is useful for providing access to static
+ * functions in a module by exporting a pointer to that function
+ * instead of having to change the static function to a non-static
+ * exported function.
+ */
+static void example_static_stub_using_fn_ptr_test(struct kunit *test)
+{
+ /* By default, function is not stubbed. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+
+ /* Replace add_one() with subtract_one(). */
+ kunit_activate_static_stub(test, add_one_fn_ptr, subtract_one);
+
+ /* add_one() is now replaced. */
+ KUNIT_EXPECT_EQ(test, add_one(1), 0);
+
+ /* Return add_one() to normal. */
+ kunit_deactivate_static_stub(test, add_one_fn_ptr);
+ KUNIT_EXPECT_EQ(test, add_one(1), 2);
+}
+
static const struct example_param {
int value;
} example_params_array[] = {
@@ -222,6 +256,20 @@ static void example_params_test(struct kunit *test)
}
/*
+ * This test shows the use of test->priv.
+ */
+static void example_priv_test(struct kunit *test)
+{
+ /* unless setup in suite->init(), test->priv is NULL */
+ KUNIT_ASSERT_NULL(test, test->priv);
+
+ /* but can be used to pass arbitrary data to other functions */
+ test->priv = kunit_kzalloc(test, 1, GFP_KERNEL);
+ KUNIT_EXPECT_NOT_NULL(test, test->priv);
+ KUNIT_ASSERT_PTR_EQ(test, test->priv, kunit_get_current_test()->priv);
+}
+
+/*
* This test should always pass. Can be used to practice filtering attributes.
*/
static void example_slow_test(struct kunit *test)
@@ -245,6 +293,8 @@ static struct kunit_case example_test_cases[] = {
KUNIT_CASE(example_mark_skipped_test),
KUNIT_CASE(example_all_expect_macros_test),
KUNIT_CASE(example_static_stub_test),
+ KUNIT_CASE(example_static_stub_using_fn_ptr_test),
+ KUNIT_CASE(example_priv_test),
KUNIT_CASE_PARAM(example_params_test, example_gen_params),
KUNIT_CASE_SLOW(example_slow_test),
{}
@@ -287,4 +337,41 @@ static struct kunit_suite example_test_suite = {
*/
kunit_test_suites(&example_test_suite);
+static int __init init_add(int x, int y)
+{
+ return (x + y);
+}
+
+/*
+ * This test should always pass. Can be used to test init suites.
+ */
+static void __init example_init_test(struct kunit *test)
+{
+ KUNIT_EXPECT_EQ(test, init_add(1, 1), 2);
+}
+
+/*
+ * The kunit_case struct cannot be marked as __initdata as this will be
+ * used in debugfs to retrieve results after test has run
+ */
+static struct kunit_case __refdata example_init_test_cases[] = {
+ KUNIT_CASE(example_init_test),
+ {}
+};
+
+/*
+ * The kunit_suite struct cannot be marked as __initdata as this will be
+ * used in debugfs to retrieve results after test has run
+ */
+static struct kunit_suite example_init_test_suite = {
+ .name = "example_init",
+ .test_cases = example_init_test_cases,
+};
+
+/*
+ * This registers the test suite and marks the suite as using init data
+ * and/or functions.
+ */
+kunit_test_init_section_suites(&example_init_test_suite);
+
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index de2113a58fa0..c4259d910356 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -5,9 +5,13 @@
* Copyright (C) 2019, Google LLC.
* Author: Brendan Higgins <brendanhiggins@google.com>
*/
+#include "linux/gfp_types.h"
#include <kunit/test.h>
#include <kunit/test-bug.h>
+#include <linux/device.h>
+#include <kunit/device.h>
+
#include "string-stream.h"
#include "try-catch-impl.h"
@@ -538,10 +542,7 @@ static struct kunit_suite kunit_resource_test_suite = {
#if IS_BUILTIN(CONFIG_KUNIT_TEST)
/* This avoids a cast warning if kfree() is passed direct to kunit_add_action(). */
-static void kfree_wrapper(void *p)
-{
- kfree(p);
-}
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_wrapper, kfree, const void *);
static void kunit_log_test(struct kunit *test)
{
@@ -690,6 +691,134 @@ static struct kunit_case kunit_current_test_cases[] = {
{}
};
+static void test_dev_action(void *priv)
+{
+ *(void **)priv = (void *)1;
+}
+
+static void kunit_device_test(struct kunit *test)
+{
+ struct device *test_device;
+ long action_was_run = 0;
+
+ test_device = kunit_device_register(test, "my_device");
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ // Add an action to verify cleanup.
+ devm_add_action(test_device, test_dev_action, &action_was_run);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 0);
+
+ kunit_device_unregister(test, test_device);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 1);
+}
+
+static void kunit_device_cleanup_test(struct kunit *test)
+{
+ struct device *test_device;
+ long action_was_run = 0;
+
+ test_device = kunit_device_register(test, "my_device");
+ KUNIT_ASSERT_NOT_NULL(test, test_device);
+
+ /* Add an action to verify cleanup. */
+ devm_add_action(test_device, test_dev_action, &action_was_run);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 0);
+
+ /* Force KUnit to run cleanup early. */
+ kunit_cleanup(test);
+
+ KUNIT_EXPECT_EQ(test, action_was_run, 1);
+}
+
+struct driver_test_state {
+ bool driver_device_probed;
+ bool driver_device_removed;
+ long action_was_run;
+};
+
+static int driver_probe_hook(struct device *dev)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct driver_test_state *state = (struct driver_test_state *)test->priv;
+
+ state->driver_device_probed = true;
+ return 0;
+}
+
+static int driver_remove_hook(struct device *dev)
+{
+ struct kunit *test = kunit_get_current_test();
+ struct driver_test_state *state = (struct driver_test_state *)test->priv;
+
+ state->driver_device_removed = true;
+ return 0;
+}
+
+static void kunit_device_driver_test(struct kunit *test)
+{
+ struct device_driver *test_driver;
+ struct device *test_device;
+ struct driver_test_state *test_state = kunit_kzalloc(test, sizeof(*test_state), GFP_KERNEL);
+
+ test->priv = test_state;
+ test_driver = kunit_driver_create(test, "my_driver");
+
+ // This can fail with an error pointer.
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_driver);
+
+ test_driver->probe = driver_probe_hook;
+ test_driver->remove = driver_remove_hook;
+
+ test_device = kunit_device_register_with_driver(test, "my_device", test_driver);
+
+ // This can fail with an error pointer.
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ // Make sure the probe function was called.
+ KUNIT_ASSERT_TRUE(test, test_state->driver_device_probed);
+
+ // Add an action to verify cleanup.
+ devm_add_action(test_device, test_dev_action, &test_state->action_was_run);
+
+ KUNIT_EXPECT_EQ(test, test_state->action_was_run, 0);
+
+ kunit_device_unregister(test, test_device);
+ test_device = NULL;
+
+ // Make sure the remove hook was called.
+ KUNIT_ASSERT_TRUE(test, test_state->driver_device_removed);
+
+ // We're going to test this again.
+ test_state->driver_device_probed = false;
+
+ // The driver should not automatically be destroyed by
+ // kunit_device_unregister, so we can re-use it.
+ test_device = kunit_device_register_with_driver(test, "my_device", test_driver);
+
+ // This can fail with an error pointer.
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
+
+ // Probe was called again.
+ KUNIT_ASSERT_TRUE(test, test_state->driver_device_probed);
+
+ // Everything is automatically freed here.
+}
+
+static struct kunit_case kunit_device_test_cases[] = {
+ KUNIT_CASE(kunit_device_test),
+ KUNIT_CASE(kunit_device_cleanup_test),
+ KUNIT_CASE(kunit_device_driver_test),
+ {}
+};
+
+static struct kunit_suite kunit_device_test_suite = {
+ .name = "kunit_device",
+ .test_cases = kunit_device_test_cases,
+};
+
static struct kunit_suite kunit_current_test_suite = {
.name = "kunit_current",
.test_cases = kunit_current_test_cases,
@@ -697,6 +826,6 @@ static struct kunit_suite kunit_current_test_suite = {
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
&kunit_log_test_suite, &kunit_status_test_suite,
- &kunit_current_test_suite);
+ &kunit_current_test_suite, &kunit_device_test_suite);
MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/string-stream-test.c b/lib/kunit/string-stream-test.c
index 06822766f29a..03fb511826f7 100644
--- a/lib/kunit/string-stream-test.c
+++ b/lib/kunit/string-stream-test.c
@@ -72,7 +72,7 @@ static void string_stream_unmanaged_init_test(struct kunit *test)
KUNIT_EXPECT_EQ(test, stream->length, 0);
KUNIT_EXPECT_TRUE(test, list_empty(&stream->fragments));
- KUNIT_EXPECT_EQ(test, stream->gfp, GFP_KERNEL);
+ KUNIT_EXPECT_TRUE(test, (stream->gfp == GFP_KERNEL));
KUNIT_EXPECT_FALSE(test, stream->append_newlines);
KUNIT_EXPECT_TRUE(test, string_stream_is_empty(stream));
diff --git a/lib/kunit/string-stream.c b/lib/kunit/string-stream.c
index a6f3616c2048..54f4fdcbfac8 100644
--- a/lib/kunit/string-stream.c
+++ b/lib/kunit/string-stream.c
@@ -173,7 +173,7 @@ void string_stream_destroy(struct string_stream *stream)
{
KUNIT_STATIC_STUB_REDIRECT(string_stream_destroy, stream);
- if (!stream)
+ if (IS_ERR_OR_NULL(stream))
return;
string_stream_clear(stream);
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 7aceb07a1af9..f95d2093a0aa 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -13,15 +13,19 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/panic.h>
#include <linux/sched/debug.h>
#include <linux/sched.h>
#include "debugfs.h"
+#include "device-impl.h"
#include "hooks-impl.h"
#include "string-stream.h"
#include "try-catch-impl.h"
+static DEFINE_MUTEX(kunit_run_lock);
+
/*
* Hook to fail the current test and print an error message to the log.
*/
@@ -660,6 +664,7 @@ int kunit_run_tests(struct kunit_suite *suite)
test.param_index++;
test.status = KUNIT_SUCCESS;
test.status_comment[0] = '\0';
+ test.priv = NULL;
}
}
@@ -692,6 +697,9 @@ static void kunit_init_suite(struct kunit_suite *suite)
kunit_debugfs_create_suite(suite);
suite->status_comment[0] = '\0';
suite->suite_init_err = 0;
+
+ if (suite->log)
+ string_stream_clear(suite->log);
}
bool kunit_enabled(void)
@@ -710,6 +718,11 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
kunit_suite_counter = 1;
+ /* Use mutex lock to guard against running tests concurrently. */
+ if (mutex_lock_interruptible(&kunit_run_lock)) {
+ pr_err("kunit: test interrupted\n");
+ return -EINTR;
+ }
static_branch_inc(&kunit_running);
for (i = 0; i < num_suites; i++) {
@@ -718,6 +731,7 @@ int __kunit_test_suites_init(struct kunit_suite * const * const suites, int num_
}
static_branch_dec(&kunit_running);
+ mutex_unlock(&kunit_run_lock);
return 0;
}
EXPORT_SYMBOL_GPL(__kunit_test_suites_init);
@@ -742,28 +756,40 @@ EXPORT_SYMBOL_GPL(__kunit_test_suites_exit);
#ifdef CONFIG_MODULES
static void kunit_module_init(struct module *mod)
{
- struct kunit_suite_set suite_set = {
+ struct kunit_suite_set suite_set, filtered_set;
+ struct kunit_suite_set normal_suite_set = {
mod->kunit_suites, mod->kunit_suites + mod->num_kunit_suites,
};
+ struct kunit_suite_set init_suite_set = {
+ mod->kunit_init_suites, mod->kunit_init_suites + mod->num_kunit_init_suites,
+ };
const char *action = kunit_action();
int err = 0;
- suite_set = kunit_filter_suites(&suite_set,
+ if (mod->num_kunit_init_suites > 0)
+ suite_set = kunit_merge_suite_sets(init_suite_set, normal_suite_set);
+ else
+ suite_set = normal_suite_set;
+
+ filtered_set = kunit_filter_suites(&suite_set,
kunit_filter_glob() ?: "*.*",
kunit_filter(), kunit_filter_action(),
&err);
if (err)
pr_err("kunit module: error filtering suites: %d\n", err);
- mod->kunit_suites = (struct kunit_suite **)suite_set.start;
- mod->num_kunit_suites = suite_set.end - suite_set.start;
+ mod->kunit_suites = (struct kunit_suite **)filtered_set.start;
+ mod->num_kunit_suites = filtered_set.end - filtered_set.start;
+
+ if (mod->num_kunit_init_suites > 0)
+ kfree(suite_set.start);
if (!action)
- kunit_exec_run_tests(&suite_set, false);
+ kunit_exec_run_tests(&filtered_set, false);
else if (!strcmp(action, "list"))
- kunit_exec_list_tests(&suite_set, false);
+ kunit_exec_list_tests(&filtered_set, false);
else if (!strcmp(action, "list_attr"))
- kunit_exec_list_tests(&suite_set, true);
+ kunit_exec_list_tests(&filtered_set, true);
else
pr_err("kunit: unknown action '%s'\n", action);
}
@@ -810,6 +836,8 @@ static struct notifier_block kunit_mod_nb = {
};
#endif
+KUNIT_DEFINE_ACTION_WRAPPER(kfree_action_wrapper, kfree, const void *)
+
void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
{
void *data;
@@ -819,7 +847,7 @@ void *kunit_kmalloc_array(struct kunit *test, size_t n, size_t size, gfp_t gfp)
if (!data)
return NULL;
- if (kunit_add_action_or_reset(test, (kunit_action_t *)kfree, data) != 0)
+ if (kunit_add_action_or_reset(test, kfree_action_wrapper, data) != 0)
return NULL;
return data;
@@ -831,7 +859,7 @@ void kunit_kfree(struct kunit *test, const void *ptr)
if (!ptr)
return;
- kunit_release_action(test, (kunit_action_t *)kfree, (void *)ptr);
+ kunit_release_action(test, kfree_action_wrapper, (void *)ptr);
}
EXPORT_SYMBOL_GPL(kunit_kfree);
@@ -876,6 +904,8 @@ static int __init kunit_init(void)
kunit_install_hooks();
kunit_debugfs_init();
+
+ kunit_bus_init();
#ifdef CONFIG_MODULES
return register_module_notifier(&kunit_mod_nb);
#else
diff --git a/lib/maple_tree.c b/lib/maple_tree.c
index bb24d84a4922..6f241bb38799 100644
--- a/lib/maple_tree.c
+++ b/lib/maple_tree.c
@@ -4,6 +4,8 @@
* Copyright (c) 2018-2022 Oracle Corporation
* Authors: Liam R. Howlett <Liam.Howlett@oracle.com>
* Matthew Wilcox <willy@infradead.org>
+ * Copyright (c) 2023 ByteDance
+ * Author: Peng Zhang <zhangpeng.00@bytedance.com>
*/
/*
@@ -14,8 +16,8 @@
* and are simply the slot index + the minimum of the node.
*
* In regular B-Tree terms, pivots are called keys. The term pivot is used to
- * indicate that the tree is specifying ranges, Pivots may appear in the
- * subtree with an entry attached to the value where as keys are unique to a
+ * indicate that the tree is specifying ranges. Pivots may appear in the
+ * subtree with an entry attached to the value whereas keys are unique to a
* specific position of a B-tree. Pivot values are inclusive of the slot with
* the same index.
*
@@ -165,6 +167,11 @@ static inline int mt_alloc_bulk(gfp_t gfp, size_t size, void **nodes)
return kmem_cache_alloc_bulk(maple_node_cache, gfp, size, nodes);
}
+static inline void mt_free_one(struct maple_node *node)
+{
+ kmem_cache_free(maple_node_cache, node);
+}
+
static inline void mt_free_bulk(size_t size, void __rcu **nodes)
{
kmem_cache_free_bulk(maple_node_cache, size, (void **)nodes);
@@ -205,23 +212,29 @@ static unsigned int mas_mt_height(struct ma_state *mas)
return mt_height(mas->tree);
}
-static inline enum maple_type mte_node_type(const struct maple_enode *entry)
+static inline unsigned int mt_attr(struct maple_tree *mt)
+{
+ return mt->ma_flags & ~MT_FLAGS_HEIGHT_MASK;
+}
+
+static __always_inline enum maple_type mte_node_type(
+ const struct maple_enode *entry)
{
return ((unsigned long)entry >> MAPLE_NODE_TYPE_SHIFT) &
MAPLE_NODE_TYPE_MASK;
}
-static inline bool ma_is_dense(const enum maple_type type)
+static __always_inline bool ma_is_dense(const enum maple_type type)
{
return type < maple_leaf_64;
}
-static inline bool ma_is_leaf(const enum maple_type type)
+static __always_inline bool ma_is_leaf(const enum maple_type type)
{
return type < maple_range_64;
}
-static inline bool mte_is_leaf(const struct maple_enode *entry)
+static __always_inline bool mte_is_leaf(const struct maple_enode *entry)
{
return ma_is_leaf(mte_node_type(entry));
}
@@ -230,60 +243,50 @@ static inline bool mte_is_leaf(const struct maple_enode *entry)
* We also reserve values with the bottom two bits set to '10' which are
* below 4096
*/
-static inline bool mt_is_reserved(const void *entry)
+static __always_inline bool mt_is_reserved(const void *entry)
{
return ((unsigned long)entry < MAPLE_RESERVED_RANGE) &&
xa_is_internal(entry);
}
-static inline void mas_set_err(struct ma_state *mas, long err)
+static __always_inline void mas_set_err(struct ma_state *mas, long err)
{
mas->node = MA_ERROR(err);
+ mas->status = ma_error;
}
-static inline bool mas_is_ptr(const struct ma_state *mas)
+static __always_inline bool mas_is_ptr(const struct ma_state *mas)
{
- return mas->node == MAS_ROOT;
+ return mas->status == ma_root;
}
-static inline bool mas_is_start(const struct ma_state *mas)
+static __always_inline bool mas_is_start(const struct ma_state *mas)
{
- return mas->node == MAS_START;
+ return mas->status == ma_start;
}
-bool mas_is_err(struct ma_state *mas)
+static __always_inline bool mas_is_none(const struct ma_state *mas)
{
- return xa_is_err(mas->node);
+ return mas->status == ma_none;
}
-static __always_inline bool mas_is_overflow(struct ma_state *mas)
+static __always_inline bool mas_is_paused(const struct ma_state *mas)
{
- if (unlikely(mas->node == MAS_OVERFLOW))
- return true;
-
- return false;
+ return mas->status == ma_pause;
}
-static __always_inline bool mas_is_underflow(struct ma_state *mas)
+static __always_inline bool mas_is_overflow(struct ma_state *mas)
{
- if (unlikely(mas->node == MAS_UNDERFLOW))
- return true;
-
- return false;
+ return mas->status == ma_overflow;
}
-static inline bool mas_searchable(struct ma_state *mas)
+static inline bool mas_is_underflow(struct ma_state *mas)
{
- if (mas_is_none(mas))
- return false;
-
- if (mas_is_ptr(mas))
- return false;
-
- return true;
+ return mas->status == ma_underflow;
}
-static inline struct maple_node *mte_to_node(const struct maple_enode *entry)
+static __always_inline struct maple_node *mte_to_node(
+ const struct maple_enode *entry)
{
return (struct maple_node *)((unsigned long)entry & ~MAPLE_NODE_MASK);
}
@@ -360,12 +363,12 @@ static inline bool mte_has_null(const struct maple_enode *node)
return (unsigned long)node & MAPLE_ENODE_NULL;
}
-static inline bool ma_is_root(struct maple_node *node)
+static __always_inline bool ma_is_root(struct maple_node *node)
{
return ((unsigned long)node->parent & MA_ROOT_PARENT);
}
-static inline bool mte_is_root(const struct maple_enode *node)
+static __always_inline bool mte_is_root(const struct maple_enode *node)
{
return ma_is_root(mte_to_node(node));
}
@@ -375,7 +378,7 @@ static inline bool mas_is_root_limits(const struct ma_state *mas)
return !mas->min && mas->max == ULONG_MAX;
}
-static inline bool mt_is_alloc(struct maple_tree *mt)
+static __always_inline bool mt_is_alloc(struct maple_tree *mt)
{
return (mt->ma_flags & MT_FLAGS_ALLOC_RANGE);
}
@@ -514,11 +517,12 @@ void mas_set_parent(struct ma_state *mas, struct maple_enode *enode,
*
* Return: The slot in the parent node where @enode resides.
*/
-static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
+static __always_inline
+unsigned int mte_parent_slot(const struct maple_enode *enode)
{
unsigned long val = (unsigned long)mte_to_node(enode)->parent;
- if (val & MA_ROOT_PARENT)
+ if (unlikely(val & MA_ROOT_PARENT))
return 0;
/*
@@ -534,7 +538,8 @@ static inline unsigned int mte_parent_slot(const struct maple_enode *enode)
*
* Return: The parent maple node.
*/
-static inline struct maple_node *mte_parent(const struct maple_enode *enode)
+static __always_inline
+struct maple_node *mte_parent(const struct maple_enode *enode)
{
return (void *)((unsigned long)
(mte_to_node(enode)->parent) & ~MAPLE_NODE_MASK);
@@ -546,7 +551,7 @@ static inline struct maple_node *mte_parent(const struct maple_enode *enode)
*
* Return: true if dead, false otherwise.
*/
-static inline bool ma_dead_node(const struct maple_node *node)
+static __always_inline bool ma_dead_node(const struct maple_node *node)
{
struct maple_node *parent;
@@ -562,7 +567,7 @@ static inline bool ma_dead_node(const struct maple_node *node)
*
* Return: true if dead, false otherwise.
*/
-static inline bool mte_dead_node(const struct maple_enode *enode)
+static __always_inline bool mte_dead_node(const struct maple_enode *enode)
{
struct maple_node *parent, *node;
@@ -680,35 +685,6 @@ static inline unsigned long *ma_gaps(struct maple_node *node,
}
/*
- * mas_pivot() - Get the pivot at @piv of the maple encoded node.
- * @mas: The maple state.
- * @piv: The pivot.
- *
- * Return: the pivot at @piv of @mn.
- */
-static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
-{
- struct maple_node *node = mas_mn(mas);
- enum maple_type type = mte_node_type(mas->node);
-
- if (MAS_WARN_ON(mas, piv >= mt_pivots[type])) {
- mas_set_err(mas, -EIO);
- return 0;
- }
-
- switch (type) {
- case maple_arange_64:
- return node->ma64.pivot[piv];
- case maple_range_64:
- case maple_leaf_64:
- return node->mr64.pivot[piv];
- case maple_dense:
- return 0;
- }
- return 0;
-}
-
-/*
* mas_safe_pivot() - get the pivot at @piv or mas->max.
* @mas: The maple state
* @pivots: The pointer to the maple node pivots
@@ -718,7 +694,7 @@ static inline unsigned long mas_pivot(struct ma_state *mas, unsigned char piv)
* Return: The pivot at @piv within the limit of the @pivots array, @mas->max
* otherwise.
*/
-static inline unsigned long
+static __always_inline unsigned long
mas_safe_pivot(const struct ma_state *mas, unsigned long *pivots,
unsigned char piv, enum maple_type type)
{
@@ -759,7 +735,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
BUG_ON(piv >= mt_pivots[type]);
switch (type) {
- default:
case maple_range_64:
case maple_leaf_64:
node->mr64.pivot[piv] = val;
@@ -783,7 +758,6 @@ static inline void mte_set_pivot(struct maple_enode *mn, unsigned char piv,
static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
{
switch (mt) {
- default:
case maple_arange_64:
return mn->ma64.slot;
case maple_range_64:
@@ -792,6 +766,8 @@ static inline void __rcu **ma_slots(struct maple_node *mn, enum maple_type mt)
case maple_dense:
return mn->slot;
}
+
+ return NULL;
}
static inline bool mt_write_locked(const struct maple_tree *mt)
@@ -800,20 +776,20 @@ static inline bool mt_write_locked(const struct maple_tree *mt)
lockdep_is_held(&mt->ma_lock);
}
-static inline bool mt_locked(const struct maple_tree *mt)
+static __always_inline bool mt_locked(const struct maple_tree *mt)
{
return mt_external_lock(mt) ? mt_lock_is_held(mt) :
lockdep_is_held(&mt->ma_lock);
}
-static inline void *mt_slot(const struct maple_tree *mt,
+static __always_inline void *mt_slot(const struct maple_tree *mt,
void __rcu **slots, unsigned char offset)
{
return rcu_dereference_check(slots[offset], mt_locked(mt));
}
-static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
- unsigned char offset)
+static __always_inline void *mt_slot_locked(struct maple_tree *mt,
+ void __rcu **slots, unsigned char offset)
{
return rcu_dereference_protected(slots[offset], mt_write_locked(mt));
}
@@ -825,8 +801,8 @@ static inline void *mt_slot_locked(struct maple_tree *mt, void __rcu **slots,
*
* Return: The entry stored in @slots at the @offset.
*/
-static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
- unsigned char offset)
+static __always_inline void *mas_slot_locked(struct ma_state *mas,
+ void __rcu **slots, unsigned char offset)
{
return mt_slot_locked(mas->tree, slots, offset);
}
@@ -839,8 +815,8 @@ static inline void *mas_slot_locked(struct ma_state *mas, void __rcu **slots,
*
* Return: The entry stored in @slots at the @offset
*/
-static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
- unsigned char offset)
+static __always_inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
+ unsigned char offset)
{
return mt_slot(mas->tree, slots, offset);
}
@@ -851,7 +827,7 @@ static inline void *mas_slot(struct ma_state *mas, void __rcu **slots,
*
* Return: The pointer to the root of the tree
*/
-static inline void *mas_root(struct ma_state *mas)
+static __always_inline void *mas_root(struct ma_state *mas)
{
return rcu_dereference_check(mas->tree->ma_root, mt_locked(mas->tree));
}
@@ -954,10 +930,8 @@ static inline unsigned char ma_meta_end(struct maple_node *mn,
/*
* ma_meta_gap() - Get the largest gap location of a node from the metadata
* @mn: The maple node
- * @mt: The maple node type
*/
-static inline unsigned char ma_meta_gap(struct maple_node *mn,
- enum maple_type mt)
+static inline unsigned char ma_meta_gap(struct maple_node *mn)
{
return mn->ma64.meta.gap;
}
@@ -1112,14 +1086,16 @@ static int mas_ascend(struct ma_state *mas)
return 0;
}
- if (!mas->min)
+ min = 0;
+ max = ULONG_MAX;
+ if (!mas->offset) {
+ min = mas->min;
set_min = true;
+ }
if (mas->max == ULONG_MAX)
set_max = true;
- min = 0;
- max = ULONG_MAX;
do {
p_enode = a_enode;
a_type = mas_parent_type(mas, p_enode);
@@ -1258,6 +1234,7 @@ static inline void mas_alloc_nodes(struct ma_state *mas, gfp_t gfp)
if (mas->mas_flags & MA_STATE_PREALLOC) {
if (allocated)
return;
+ BUG_ON(!allocated);
WARN_ON(!allocated);
}
@@ -1363,14 +1340,14 @@ static void mas_node_count(struct ma_state *mas, int count)
* mas_start() - Sets up maple state for operations.
* @mas: The maple state.
*
- * If mas->node == MAS_START, then set the min, max and depth to
+ * If mas->status == mas_start, then set the min, max and depth to
* defaults.
*
* Return:
- * - If mas->node is an error or not MAS_START, return NULL.
- * - If it's an empty tree: NULL & mas->node == MAS_NONE
- * - If it's a single entry: The entry & mas->node == MAS_ROOT
- * - If it's a tree: NULL & mas->node == safe root node.
+ * - If mas->node is an error or not mas_start, return NULL.
+ * - If it's an empty tree: NULL & mas->status == ma_none
+ * - If it's a single entry: The entry & mas->status == mas_root
+ * - If it's a tree: NULL & mas->status == safe root node.
*/
static inline struct maple_enode *mas_start(struct ma_state *mas)
{
@@ -1386,6 +1363,7 @@ retry:
/* Tree with nodes */
if (likely(xa_is_node(root))) {
mas->depth = 1;
+ mas->status = ma_active;
mas->node = mte_safe_root(root);
mas->offset = 0;
if (mte_dead_node(mas->node))
@@ -1396,13 +1374,14 @@ retry:
/* empty tree */
if (unlikely(!root)) {
- mas->node = MAS_NONE;
+ mas->node = NULL;
+ mas->status = ma_none;
mas->offset = MAPLE_NODE_SLOTS;
return NULL;
}
/* Single entry tree */
- mas->node = MAS_ROOT;
+ mas->status = ma_root;
mas->offset = MAPLE_NODE_SLOTS;
/* Single entry tree. */
@@ -1425,10 +1404,8 @@ retry:
* Uses metadata to find the end of the data when possible.
* Return: The zero indexed last slot with data (may be null).
*/
-static inline unsigned char ma_data_end(struct maple_node *node,
- enum maple_type type,
- unsigned long *pivots,
- unsigned long max)
+static __always_inline unsigned char ma_data_end(struct maple_node *node,
+ enum maple_type type, unsigned long *pivots, unsigned long max)
{
unsigned char offset;
@@ -1541,6 +1518,9 @@ static unsigned long mas_leaf_max_gap(struct ma_state *mas)
gap = ULONG_MAX - pivots[max_piv];
if (gap > max_gap)
max_gap = gap;
+
+ if (max_gap > pivots[max_piv] - mas->min)
+ return max_gap;
}
for (; i <= max_piv; i++) {
@@ -1608,7 +1588,7 @@ static inline unsigned long mas_max_gap(struct ma_state *mas)
node = mas_mn(mas);
MAS_BUG_ON(mas, mt != maple_arange_64);
- offset = ma_meta_gap(node, mt);
+ offset = ma_meta_gap(node);
gaps = ma_gaps(node, mt);
return gaps[offset];
}
@@ -1639,7 +1619,7 @@ static inline void mas_parent_gap(struct ma_state *mas, unsigned char offset,
ascend:
MAS_BUG_ON(mas, pmt != maple_arange_64);
- meta_offset = ma_meta_gap(pnode, pmt);
+ meta_offset = ma_meta_gap(pnode);
meta_gap = pgaps[meta_offset];
pgaps[offset] = new;
@@ -1987,27 +1967,13 @@ complete:
/*
* mas_leaf_set_meta() - Set the metadata of a leaf if possible.
- * @mas: The maple state
* @node: The maple node
- * @pivots: pointer to the maple node pivots
* @mt: The maple type
- * @end: The assumed end
- *
- * Note, end may be incremented within this function but not modified at the
- * source. This is fine since the metadata is the last thing to be stored in a
- * node during a write.
+ * @end: The node end
*/
-static inline void mas_leaf_set_meta(struct ma_state *mas,
- struct maple_node *node, unsigned long *pivots,
+static inline void mas_leaf_set_meta(struct maple_node *node,
enum maple_type mt, unsigned char end)
{
- /* There is no room for metadata already */
- if (mt_pivots[mt] <= end)
- return;
-
- if (pivots[end] && pivots[end] < mas->max)
- end++;
-
if (end < mt_slots[mt] - 1)
ma_set_meta(node, mt, 0, end);
}
@@ -2064,7 +2030,7 @@ static inline void mab_mas_cp(struct maple_big_node *b_node,
ma_set_meta(node, mt, offset, end);
} else {
- mas_leaf_set_meta(mas, node, pivots, mt, end);
+ mas_leaf_set_meta(node, mt, end);
}
}
@@ -2152,11 +2118,11 @@ static noinline_for_kasan void mas_store_b_node(struct ma_wr_state *wr_mas,
}
slot = offset_end + 1;
- if (slot > wr_mas->node_end)
+ if (slot > mas->end)
goto b_end;
/* Copy end data to the end of the node. */
- mas_mab_cp(mas, slot, wr_mas->node_end + 1, b_node, ++b_end);
+ mas_mab_cp(mas, slot, mas->end + 1, b_node, ++b_end);
b_node->b_end--;
return;
@@ -2211,19 +2177,21 @@ static inline bool mas_next_sibling(struct ma_state *mas)
}
/*
- * mte_node_or_node() - Return the encoded node or MAS_NONE.
+ * mte_node_or_none() - Set the enode and state.
* @enode: The encoded maple node.
*
- * Shorthand to avoid setting %NULLs in the tree or maple_subtree_state.
- *
- * Return: @enode or MAS_NONE
+ * Set the node to the enode and the status.
*/
-static inline struct maple_enode *mte_node_or_none(struct maple_enode *enode)
+static inline void mas_node_or_none(struct ma_state *mas,
+ struct maple_enode *enode)
{
- if (enode)
- return enode;
-
- return ma_enode_ptr(MAS_NONE);
+ if (enode) {
+ mas->node = enode;
+ mas->status = ma_active;
+ } else {
+ mas->node = NULL;
+ mas->status = ma_none;
+ }
}
/*
@@ -2245,8 +2213,8 @@ static inline void mas_wr_node_walk(struct ma_wr_state *wr_mas)
wr_mas->node = mas_mn(wr_mas->mas);
wr_mas->pivots = ma_pivots(wr_mas->node, wr_mas->type);
- count = wr_mas->node_end = ma_data_end(wr_mas->node, wr_mas->type,
- wr_mas->pivots, mas->max);
+ count = mas->end = ma_data_end(wr_mas->node, wr_mas->type,
+ wr_mas->pivots, mas->max);
offset = mas->offset;
while (offset < count && mas->index > wr_mas->pivots[offset])
@@ -2535,7 +2503,7 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
}
/*
- * mas_topiary_node() - Dispose of a singe node
+ * mas_topiary_node() - Dispose of a single node
* @mas: The maple state for pushing nodes
* @enode: The encoded maple node
* @in_rcu: If the tree is in rcu mode
@@ -2543,13 +2511,15 @@ static inline void mast_set_split_parents(struct maple_subtree_state *mast,
* The node will either be RCU freed or pushed back on the maple state.
*/
static inline void mas_topiary_node(struct ma_state *mas,
- struct maple_enode *enode, bool in_rcu)
+ struct ma_state *tmp_mas, bool in_rcu)
{
struct maple_node *tmp;
+ struct maple_enode *enode;
- if (enode == MAS_NONE)
+ if (mas_is_none(tmp_mas))
return;
+ enode = tmp_mas->node;
tmp = mte_to_node(enode);
mte_set_node_dead(enode);
if (in_rcu)
@@ -2589,8 +2559,8 @@ static inline void mas_topiary_replace(struct ma_state *mas,
/* Update the parent pointers in the tree */
tmp[0] = *mas;
tmp[0].offset = 0;
- tmp[1].node = MAS_NONE;
- tmp[2].node = MAS_NONE;
+ tmp[1].status = ma_none;
+ tmp[2].status = ma_none;
while (!mte_is_leaf(tmp[0].node)) {
n = 0;
for (i = 0; i < 3; i++) {
@@ -2610,7 +2580,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
break;
while (n < 3)
- tmp_next[n++].node = MAS_NONE;
+ tmp_next[n++].status = ma_none;
for (i = 0; i < 3; i++)
tmp[i] = tmp_next[i];
@@ -2623,8 +2593,8 @@ static inline void mas_topiary_replace(struct ma_state *mas,
tmp[0] = *mas;
tmp[0].offset = 0;
tmp[0].node = old_enode;
- tmp[1].node = MAS_NONE;
- tmp[2].node = MAS_NONE;
+ tmp[1].status = ma_none;
+ tmp[2].status = ma_none;
in_rcu = mt_in_rcu(mas->tree);
do {
n = 0;
@@ -2639,7 +2609,7 @@ static inline void mas_topiary_replace(struct ma_state *mas,
if ((tmp_next[n].min >= tmp_next->index) &&
(tmp_next[n].max <= tmp_next->last)) {
mat_add(&subtrees, tmp_next[n].node);
- tmp_next[n].node = MAS_NONE;
+ tmp_next[n].status = ma_none;
} else {
n++;
}
@@ -2650,16 +2620,16 @@ static inline void mas_topiary_replace(struct ma_state *mas,
break;
while (n < 3)
- tmp_next[n++].node = MAS_NONE;
+ tmp_next[n++].status = ma_none;
for (i = 0; i < 3; i++) {
- mas_topiary_node(mas, tmp[i].node, in_rcu);
+ mas_topiary_node(mas, &tmp[i], in_rcu);
tmp[i] = tmp_next[i];
}
} while (!mte_is_leaf(tmp[0].node));
for (i = 0; i < 3; i++)
- mas_topiary_node(mas, tmp[i].node, in_rcu);
+ mas_topiary_node(mas, &tmp[i], in_rcu);
mas_mat_destroy(mas, &subtrees);
}
@@ -2698,9 +2668,9 @@ static inline void mast_cp_to_nodes(struct maple_subtree_state *mast,
{
bool new_lmax = true;
- mast->l->node = mte_node_or_none(left);
- mast->m->node = mte_node_or_none(middle);
- mast->r->node = mte_node_or_none(right);
+ mas_node_or_none(mast->l, left);
+ mas_node_or_none(mast->m, middle);
+ mas_node_or_none(mast->r, right);
mast->l->min = mast->orig_l->min;
if (split == mast->bn->b_end) {
@@ -2796,32 +2766,29 @@ static inline void *mtree_range_walk(struct ma_state *mas)
min = mas->min;
max = mas->max;
do {
- offset = 0;
last = next;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
end = ma_data_end(node, type, pivots, max);
- if (unlikely(ma_dead_node(node)))
- goto dead_node;
-
- if (pivots[offset] >= mas->index) {
- prev_max = max;
- prev_min = min;
- max = pivots[offset];
+ prev_min = min;
+ prev_max = max;
+ if (pivots[0] >= mas->index) {
+ offset = 0;
+ max = pivots[0];
goto next;
}
- do {
+ offset = 1;
+ while (offset < end) {
+ if (pivots[offset] >= mas->index) {
+ max = pivots[offset];
+ break;
+ }
offset++;
- } while ((offset < end) && (pivots[offset] < mas->index));
+ }
- prev_min = min;
min = pivots[offset - 1] + 1;
- prev_max = max;
- if (likely(offset < end && pivots[offset]))
- max = pivots[offset];
-
next:
slots = ma_slots(node, type);
next = mt_slot(mas->tree, slots, offset);
@@ -2829,6 +2796,7 @@ next:
goto dead_node;
} while (!ma_is_leaf(type));
+ mas->end = end;
mas->offset = offset;
mas->index = min;
mas->last = max;
@@ -2879,7 +2847,7 @@ static int mas_spanning_rebalance(struct ma_state *mas,
mast->l = &l_mas;
mast->m = &m_mas;
mast->r = &r_mas;
- l_mas.node = r_mas.node = m_mas.node = MAS_NONE;
+ l_mas.status = r_mas.status = m_mas.status = ma_none;
/* Check if this is not root and has sufficient data. */
if (((mast->orig_l->min != 0) || (mast->orig_r->max != ULONG_MAX)) &&
@@ -3167,7 +3135,7 @@ done:
* @mas: The maple state
* @height: The height of the tree in case it's a new root.
*/
-static inline bool mas_split_final_node(struct maple_subtree_state *mast,
+static inline void mas_split_final_node(struct maple_subtree_state *mast,
struct ma_state *mas, int height)
{
struct maple_enode *ancestor;
@@ -3191,7 +3159,6 @@ static inline bool mas_split_final_node(struct maple_subtree_state *mast,
mast->l->node = ancestor;
mab_mas_cp(mast->bn, 0, mt_slots[mast->bn->type] - 1, mast->l, true);
mas->offset = mast->bn->b_end - 1;
- return true;
}
/*
@@ -3406,7 +3373,6 @@ static int mas_split(struct ma_state *mas, struct maple_big_node *b_node)
/* Try to push left. */
if (mas_push_data(mas, height, &mast, true))
break;
-
/* Try to push right. */
if (mas_push_data(mas, height, &mast, false))
break;
@@ -3495,6 +3461,7 @@ static noinline_for_kasan int mas_commit_b_node(struct ma_wr_state *wr_mas,
mas_replace_node(wr_mas->mas, old_enode);
reuse_node:
mas_update_gap(wr_mas->mas);
+ wr_mas->mas->end = b_end;
return 1;
}
@@ -3521,6 +3488,7 @@ static inline int mas_root_expand(struct ma_state *mas, void *entry)
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
+ mas->status = ma_active;
if (mas->index) {
if (contents) {
@@ -3553,7 +3521,7 @@ static inline void mas_store_root(struct ma_state *mas, void *entry)
mas_root_expand(mas, entry);
else {
rcu_assign_pointer(mas->tree->ma_root, entry);
- mas->node = MAS_START;
+ mas->status = ma_start;
}
}
@@ -3730,23 +3698,17 @@ static inline void *mtree_lookup_walk(struct ma_state *mas)
enum maple_type type;
void __rcu **slots;
unsigned char end;
- unsigned long max;
next = mas->node;
- max = ULONG_MAX;
do {
- offset = 0;
node = mte_to_node(next);
type = mte_node_type(next);
pivots = ma_pivots(node, type);
- end = ma_data_end(node, type, pivots, max);
- if (unlikely(ma_dead_node(node)))
- goto dead_node;
+ end = mt_pivots[type];
+ offset = 0;
do {
- if (pivots[offset] >= mas->index) {
- max = pivots[offset];
+ if (pivots[offset] >= mas->index)
break;
- }
} while (++offset < end);
slots = ma_slots(node, type);
@@ -3785,7 +3747,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
mas->depth = 0;
mas_set_height(mas);
rcu_assign_pointer(mas->tree->ma_root, entry);
- mas->node = MAS_START;
+ mas->status = ma_start;
goto done;
}
@@ -3798,6 +3760,7 @@ static inline int mas_new_root(struct ma_state *mas, void *entry)
slots = ma_slots(node, type);
node->parent = ma_parent_ptr(mas_tree_parent(mas));
mas->node = mt_mk_node(node, type);
+ mas->status = ma_active;
rcu_assign_pointer(slots[0], entry);
pivots[0] = mas->last;
mas->depth = 1;
@@ -3891,10 +3854,10 @@ static inline int mas_wr_spanning_store(struct ma_wr_state *wr_mas)
memset(&b_node, 0, sizeof(struct maple_big_node));
/* Copy l_mas and store the value in b_node. */
- mas_store_b_node(&l_wr_mas, &b_node, l_wr_mas.node_end);
+ mas_store_b_node(&l_wr_mas, &b_node, l_mas.end);
/* Copy r_mas into b_node. */
- if (r_mas.offset <= r_wr_mas.node_end)
- mas_mab_cp(&r_mas, r_mas.offset, r_wr_mas.node_end,
+ if (r_mas.offset <= r_mas.end)
+ mas_mab_cp(&r_mas, r_mas.offset, r_mas.end,
&b_node, b_node.b_end + 1);
else
b_node.b_end++;
@@ -3936,7 +3899,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
if (mas->last == wr_mas->end_piv)
offset_end++; /* don't copy this offset */
else if (unlikely(wr_mas->r_max == ULONG_MAX))
- mas_bulk_rebalance(mas, wr_mas->node_end, wr_mas->type);
+ mas_bulk_rebalance(mas, mas->end, wr_mas->type);
/* set up node. */
if (in_rcu) {
@@ -3972,12 +3935,12 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
* this range wrote to the end of the node or it overwrote the rest of
* the data
*/
- if (offset_end > wr_mas->node_end)
+ if (offset_end > mas->end)
goto done;
dst_offset = mas->offset + 1;
/* Copy to the end of node if necessary. */
- copy_size = wr_mas->node_end - offset_end + 1;
+ copy_size = mas->end - offset_end + 1;
memcpy(dst_slots + dst_offset, wr_mas->slots + offset_end,
sizeof(void *) * copy_size);
memcpy(dst_pivots + dst_offset, wr_mas->pivots + offset_end,
@@ -3987,7 +3950,7 @@ static inline bool mas_wr_node_store(struct ma_wr_state *wr_mas,
dst_pivots[new_end] = mas->max;
done:
- mas_leaf_set_meta(mas, newnode, dst_pivots, maple_leaf_64, new_end);
+ mas_leaf_set_meta(newnode, maple_leaf_64, new_end);
if (in_rcu) {
struct maple_enode *old_enode = mas->node;
@@ -3998,6 +3961,7 @@ done:
}
trace_ma_write(__func__, mas, 0, wr_mas->entry);
mas_update_gap(mas);
+ mas->end = new_end;
return true;
}
@@ -4063,10 +4027,10 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
} else {
/* Check next slot(s) if we are overwriting the end */
if ((mas->last == wr_mas->end_piv) &&
- (wr_mas->node_end != wr_mas->offset_end) &&
+ (mas->end != wr_mas->offset_end) &&
!wr_mas->slots[wr_mas->offset_end + 1]) {
wr_mas->offset_end++;
- if (wr_mas->offset_end == wr_mas->node_end)
+ if (wr_mas->offset_end == mas->end)
mas->last = mas->max;
else
mas->last = wr_mas->pivots[wr_mas->offset_end];
@@ -4091,11 +4055,11 @@ static inline void mas_wr_extend_null(struct ma_wr_state *wr_mas)
static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
{
- while ((wr_mas->offset_end < wr_mas->node_end) &&
+ while ((wr_mas->offset_end < wr_mas->mas->end) &&
(wr_mas->mas->last > wr_mas->pivots[wr_mas->offset_end]))
wr_mas->offset_end++;
- if (wr_mas->offset_end < wr_mas->node_end)
+ if (wr_mas->offset_end < wr_mas->mas->end)
wr_mas->end_piv = wr_mas->pivots[wr_mas->offset_end];
else
wr_mas->end_piv = wr_mas->mas->max;
@@ -4107,7 +4071,7 @@ static inline void mas_wr_end_piv(struct ma_wr_state *wr_mas)
static inline unsigned char mas_wr_new_end(struct ma_wr_state *wr_mas)
{
struct ma_state *mas = wr_mas->mas;
- unsigned char new_end = wr_mas->node_end + 2;
+ unsigned char new_end = mas->end + 2;
new_end -= wr_mas->offset_end - mas->offset;
if (wr_mas->r_min == mas->index)
@@ -4141,10 +4105,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
if (mt_in_rcu(mas->tree))
return false;
- if (mas->offset != wr_mas->node_end)
- return false;
-
- end = wr_mas->node_end;
+ end = mas->end;
if (mas->offset != end)
return false;
@@ -4178,6 +4139,7 @@ static inline bool mas_wr_append(struct ma_wr_state *wr_mas,
if (!wr_mas->content || !wr_mas->entry)
mas_update_gap(mas);
+ mas->end = new_end;
trace_ma_write(__func__, mas, new_end, wr_mas->entry);
return true;
}
@@ -4195,7 +4157,7 @@ static void mas_wr_bnode(struct ma_wr_state *wr_mas)
trace_ma_write(__func__, wr_mas->mas, 0, wr_mas->entry);
memset(&b_node, 0, sizeof(struct maple_big_node));
mas_store_b_node(wr_mas, &b_node, wr_mas->offset_end);
- mas_commit_b_node(wr_mas, &b_node, wr_mas->node_end);
+ mas_commit_b_node(wr_mas, &b_node, wr_mas->mas->end);
}
static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
@@ -4223,7 +4185,7 @@ static inline void mas_wr_modify(struct ma_wr_state *wr_mas)
if (mas_wr_append(wr_mas, new_end))
return;
- if (new_end == wr_mas->node_end && mas_wr_slot_store(wr_mas))
+ if (new_end == mas->end && mas_wr_slot_store(wr_mas))
return;
if (mas_wr_node_store(wr_mas, new_end))
@@ -4328,7 +4290,7 @@ exists:
}
-static inline void mas_rewalk(struct ma_state *mas, unsigned long index)
+static __always_inline void mas_rewalk(struct ma_state *mas, unsigned long index)
{
retry:
mas_set(mas, index);
@@ -4337,7 +4299,7 @@ retry:
goto retry;
}
-static inline bool mas_rewalk_if_dead(struct ma_state *mas,
+static __always_inline bool mas_rewalk_if_dead(struct ma_state *mas,
struct maple_node *node, const unsigned long index)
{
if (unlikely(ma_dead_node(node))) {
@@ -4349,14 +4311,16 @@ static inline bool mas_rewalk_if_dead(struct ma_state *mas,
/*
* mas_prev_node() - Find the prev non-null entry at the same level in the
- * tree. The prev value will be mas->node[mas->offset] or MAS_NONE.
+ * tree. The prev value will be mas->node[mas->offset] or the status will be
+ * ma_none.
* @mas: The maple state
* @min: The lower limit to search
*
- * The prev node value will be mas->node[mas->offset] or MAS_NONE.
+ * The prev node value will be mas->node[mas->offset] or the status will be
+ * ma_none.
* Return: 1 if the node is dead, 0 otherwise.
*/
-static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
+static int mas_prev_node(struct ma_state *mas, unsigned long min)
{
enum maple_type mt;
int offset, level;
@@ -4416,13 +4380,14 @@ static inline int mas_prev_node(struct ma_state *mas, unsigned long min)
if (unlikely(mte_dead_node(mas->node)))
return 1;
+ mas->end = mas->offset;
return 0;
no_entry:
if (unlikely(ma_dead_node(node)))
return 1;
- mas->node = MAS_NONE;
+ mas->status = ma_underflow;
return 0;
}
@@ -4436,8 +4401,7 @@ no_entry:
*
* Return: The entry in the previous slot which is possibly NULL
*/
-static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty,
- bool set_underflow)
+static void *mas_prev_slot(struct ma_state *mas, unsigned long min, bool empty)
{
void *entry;
void __rcu **slots;
@@ -4470,13 +4434,16 @@ again:
mas->last = mas->index - 1;
mas->index = mas_safe_min(mas, pivots, mas->offset);
} else {
+ if (mas->index <= min)
+ goto underflow;
+
if (mas_prev_node(mas, min)) {
mas_rewalk(mas, save_point);
goto retry;
}
- if (mas_is_none(mas))
- goto underflow;
+ if (WARN_ON_ONCE(mas_is_underflow(mas)))
+ return NULL;
mas->last = mas->max;
node = mas_mn(mas);
@@ -4490,12 +4457,15 @@ again:
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
+
if (likely(entry))
return entry;
if (!empty) {
- if (mas->index <= min)
- goto underflow;
+ if (mas->index <= min) {
+ mas->status = ma_underflow;
+ return NULL;
+ }
goto again;
}
@@ -4503,8 +4473,7 @@ again:
return entry;
underflow:
- if (set_underflow)
- mas->node = MAS_UNDERFLOW;
+ mas->status = ma_underflow;
return NULL;
}
@@ -4513,28 +4482,30 @@ underflow:
* @mas: The maple state
* @max: The maximum pivot value to check.
*
- * The next value will be mas->node[mas->offset] or MAS_NONE.
+ * The next value will be mas->node[mas->offset] or the status will have
+ * overflowed.
* Return: 1 on dead node, 0 otherwise.
*/
-static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
- unsigned long max)
+static int mas_next_node(struct ma_state *mas, struct maple_node *node,
+ unsigned long max)
{
unsigned long min;
unsigned long *pivots;
struct maple_enode *enode;
+ struct maple_node *tmp;
int level = 0;
unsigned char node_end;
enum maple_type mt;
void __rcu **slots;
if (mas->max >= max)
- goto no_entry;
+ goto overflow;
min = mas->max + 1;
level = 0;
do {
if (ma_is_root(node))
- goto no_entry;
+ goto overflow;
/* Walk up. */
if (unlikely(mas_ascend(mas)))
@@ -4574,6 +4545,10 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
pivots = ma_pivots(node, mt);
mas->max = mas_safe_pivot(mas, pivots, mas->offset, mt);
+ tmp = mte_to_node(enode);
+ mt = mte_node_type(enode);
+ pivots = ma_pivots(tmp, mt);
+ mas->end = ma_data_end(tmp, mt, pivots, mas->max);
if (unlikely(ma_dead_node(node)))
return 1;
@@ -4581,11 +4556,11 @@ static inline int mas_next_node(struct ma_state *mas, struct maple_node *node,
mas->min = min;
return 0;
-no_entry:
+overflow:
if (unlikely(ma_dead_node(node)))
return 1;
- mas->node = MAS_NONE;
+ mas->status = ma_overflow;
return 0;
}
@@ -4600,15 +4575,13 @@ no_entry:
*
* Return: The entry in the next slot which is possibly NULL
*/
-static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty,
- bool set_overflow)
+static void *mas_next_slot(struct ma_state *mas, unsigned long max, bool empty)
{
void __rcu **slots;
unsigned long *pivots;
unsigned long pivot;
enum maple_type type;
struct maple_node *node;
- unsigned char data_end;
unsigned long save_point = mas->last;
void *entry;
@@ -4616,42 +4589,45 @@ retry:
node = mas_mn(mas);
type = mte_node_type(mas->node);
pivots = ma_pivots(node, type);
- data_end = ma_data_end(node, type, pivots, mas->max);
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
if (mas->max >= max) {
- if (likely(mas->offset < data_end))
+ if (likely(mas->offset < mas->end))
pivot = pivots[mas->offset];
else
- goto overflow;
+ pivot = mas->max;
if (unlikely(mas_rewalk_if_dead(mas, node, save_point)))
goto retry;
- if (pivot >= max)
- goto overflow;
+ if (pivot >= max) { /* Was at the limit, next will extend beyond */
+ mas->status = ma_overflow;
+ return NULL;
+ }
}
- if (likely(mas->offset < data_end)) {
+ if (likely(mas->offset < mas->end)) {
mas->index = pivots[mas->offset] + 1;
again:
mas->offset++;
- if (likely(mas->offset < data_end))
+ if (likely(mas->offset < mas->end))
mas->last = pivots[mas->offset];
else
mas->last = mas->max;
} else {
+ if (mas->last >= max) {
+ mas->status = ma_overflow;
+ return NULL;
+ }
+
if (mas_next_node(mas, node, max)) {
mas_rewalk(mas, save_point);
goto retry;
}
- if (WARN_ON_ONCE(mas_is_none(mas))) {
- mas->node = MAS_OVERFLOW;
+ if (WARN_ON_ONCE(mas_is_overflow(mas)))
return NULL;
- goto overflow;
- }
mas->offset = 0;
mas->index = mas->min;
@@ -4669,21 +4645,18 @@ again:
if (entry)
return entry;
+
if (!empty) {
- if (mas->last >= max)
- goto overflow;
+ if (mas->last >= max) {
+ mas->status = ma_overflow;
+ return NULL;
+ }
mas->index = mas->last + 1;
- /* Node cannot end on NULL, so it's safe to short-cut here */
goto again;
}
return entry;
-
-overflow:
- if (set_overflow)
- mas->node = MAS_OVERFLOW;
- return NULL;
}
/*
@@ -4702,11 +4675,11 @@ overflow:
static inline void *mas_next_entry(struct ma_state *mas, unsigned long limit)
{
if (mas->last >= limit) {
- mas->node = MAS_OVERFLOW;
+ mas->status = ma_overflow;
return NULL;
}
- return mas_next_slot(mas, limit, false, true);
+ return mas_next_slot(mas, limit, false);
}
/*
@@ -4874,7 +4847,7 @@ done:
* @mas: The maple state.
*
* mas->index and mas->last will be set to the range if there is a value. If
- * mas->node is MAS_NONE, reset to MAS_START.
+ * mas->status is ma_none, reset to ma_start
*
* Return: the entry at the location or %NULL.
*/
@@ -4883,7 +4856,7 @@ void *mas_walk(struct ma_state *mas)
void *entry;
if (!mas_is_active(mas) || !mas_is_start(mas))
- mas->node = MAS_START;
+ mas->status = ma_start;
retry:
entry = mas_state_walk(mas);
if (mas_is_start(mas)) {
@@ -4899,7 +4872,7 @@ retry:
mas->index = 1;
mas->last = ULONG_MAX;
- mas->node = MAS_NONE;
+ mas->status = ma_none;
return NULL;
}
@@ -5026,6 +4999,7 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
unsigned char offset;
unsigned long *pivots;
enum maple_type mt;
+ struct maple_node *node;
if (min > max)
return -EINVAL;
@@ -5056,12 +5030,14 @@ int mas_empty_area(struct ma_state *mas, unsigned long min,
if (unlikely(offset == MAPLE_NODE_SLOTS))
return -EBUSY;
+ node = mas_mn(mas);
mt = mte_node_type(mas->node);
- pivots = ma_pivots(mas_mn(mas), mt);
+ pivots = ma_pivots(node, mt);
min = mas_safe_min(mas, pivots, offset);
if (mas->index < min)
mas->index = min;
mas->last = mas->index + size - 1;
+ mas->end = ma_data_end(node, mt, pivots, mas->max);
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area);
@@ -5122,6 +5098,7 @@ int mas_empty_area_rev(struct ma_state *mas, unsigned long min,
mas->last = max;
mas->index = mas->last - size + 1;
+ mas->end = mas_data_end(mas);
return 0;
}
EXPORT_SYMBOL_GPL(mas_empty_area_rev);
@@ -5501,13 +5478,24 @@ int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp)
mas_wr_end_piv(&wr_mas);
node_size = mas_wr_new_end(&wr_mas);
+
+ /* Slot store, does not require additional nodes */
+ if (node_size == mas->end) {
+ /* reuse node */
+ if (!mt_in_rcu(mas->tree))
+ return 0;
+ /* shifting boundary */
+ if (wr_mas.offset_end - mas->offset == 1)
+ return 0;
+ }
+
if (node_size >= mt_slots[wr_mas.type]) {
/* Split, worst case for now. */
request = 1 + mas_mt_height(mas) * 2;
goto ask_now;
}
- /* New root needs a singe node */
+ /* New root needs a single node */
if (unlikely(mte_is_root(mas->node)))
goto ask_now;
@@ -5555,7 +5543,7 @@ void mas_destroy(struct ma_state *mas)
mas_start(mas);
mtree_range_walk(mas);
- end = mas_data_end(mas) + 1;
+ end = mas->end + 1;
if (end < mt_min_slot_count(mas->node) - 1)
mas_destroy_rebalance(mas, end);
@@ -5573,7 +5561,7 @@ void mas_destroy(struct ma_state *mas)
mt_free_bulk(count, (void __rcu **)&node->slot[1]);
total -= count;
}
- kmem_cache_free(maple_node_cache, node);
+ mt_free_one(ma_mnode_ptr(node));
total--;
}
@@ -5643,33 +5631,46 @@ int mas_expected_entries(struct ma_state *mas, unsigned long nr_entries)
}
EXPORT_SYMBOL_GPL(mas_expected_entries);
-static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
+static bool mas_next_setup(struct ma_state *mas, unsigned long max,
void **entry)
{
bool was_none = mas_is_none(mas);
if (unlikely(mas->last >= max)) {
- mas->node = MAS_OVERFLOW;
+ mas->status = ma_overflow;
return true;
}
- if (mas_is_active(mas))
+ switch (mas->status) {
+ case ma_active:
return false;
-
- if (mas_is_none(mas) || mas_is_paused(mas)) {
- mas->node = MAS_START;
- } else if (mas_is_overflow(mas)) {
+ case ma_none:
+ fallthrough;
+ case ma_pause:
+ mas->status = ma_start;
+ fallthrough;
+ case ma_start:
+ mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+ break;
+ case ma_overflow:
/* Overflowed before, but the max changed */
- mas->node = MAS_START;
- } else if (mas_is_underflow(mas)) {
- mas->node = MAS_START;
+ mas->status = ma_active;
+ break;
+ case ma_underflow:
+ /* The user expects the mas to be one before where it is */
+ mas->status = ma_active;
*entry = mas_walk(mas);
if (*entry)
return true;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
- if (mas_is_start(mas))
- *entry = mas_walk(mas); /* Retries on dead nodes handled by mas_walk */
+ if (likely(mas_is_active(mas))) /* Fast path */
+ return false;
if (mas_is_ptr(mas)) {
*entry = NULL;
@@ -5679,7 +5680,7 @@ static inline bool mas_next_setup(struct ma_state *mas, unsigned long max,
}
mas->index = 1;
mas->last = ULONG_MAX;
- mas->node = MAS_NONE;
+ mas->status = ma_none;
return true;
}
@@ -5708,7 +5709,7 @@ void *mas_next(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false, true);
+ return mas_next_slot(mas, max, false);
}
EXPORT_SYMBOL_GPL(mas_next);
@@ -5731,7 +5732,7 @@ void *mas_next_range(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true, true);
+ return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_next_range);
@@ -5759,37 +5760,48 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max)
}
EXPORT_SYMBOL_GPL(mt_next);
-static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
- void **entry)
+static bool mas_prev_setup(struct ma_state *mas, unsigned long min, void **entry)
{
if (unlikely(mas->index <= min)) {
- mas->node = MAS_UNDERFLOW;
+ mas->status = ma_underflow;
return true;
}
- if (mas_is_active(mas))
+ switch (mas->status) {
+ case ma_active:
return false;
-
- if (mas_is_overflow(mas)) {
- mas->node = MAS_START;
+ case ma_start:
+ break;
+ case ma_none:
+ fallthrough;
+ case ma_pause:
+ mas->status = ma_start;
+ break;
+ case ma_underflow:
+ /* underflowed before but the min changed */
+ mas->status = ma_active;
+ break;
+ case ma_overflow:
+ /* User expects mas to be one after where it is */
+ mas->status = ma_active;
*entry = mas_walk(mas);
if (*entry)
return true;
- }
-
- if (mas_is_none(mas) || mas_is_paused(mas)) {
- mas->node = MAS_START;
- } else if (mas_is_underflow(mas)) {
- /* underflowed before but the min changed */
- mas->node = MAS_START;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
if (mas_is_start(mas))
mas_walk(mas);
if (unlikely(mas_is_ptr(mas))) {
- if (!mas->index)
- goto none;
+ if (!mas->index) {
+ mas->status = ma_none;
+ return true;
+ }
mas->index = mas->last = 0;
*entry = mas_root(mas);
return true;
@@ -5799,7 +5811,7 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
if (mas->index) {
/* Walked to out-of-range pointer? */
mas->index = mas->last = 0;
- mas->node = MAS_ROOT;
+ mas->status = ma_root;
*entry = mas_root(mas);
return true;
}
@@ -5807,10 +5819,6 @@ static inline bool mas_prev_setup(struct ma_state *mas, unsigned long min,
}
return false;
-
-none:
- mas->node = MAS_NONE;
- return true;
}
/**
@@ -5819,7 +5827,7 @@ none:
* @min: The minimum value to check.
*
* Must hold rcu_read_lock or the write lock.
- * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * Will reset mas to ma_start if the status is ma_none. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
@@ -5831,7 +5839,7 @@ void *mas_prev(struct ma_state *mas, unsigned long min)
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, false, true);
+ return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_prev);
@@ -5842,7 +5850,7 @@ EXPORT_SYMBOL_GPL(mas_prev);
*
* Sets @mas->index and @mas->last to the range.
* Must hold rcu_read_lock or the write lock.
- * Will reset mas to MAS_START if the node is MAS_NONE. Will stop on not
+ * Will reset mas to ma_start if the node is ma_none. Will stop on not
* searchable nodes.
*
* Return: the previous value or %NULL.
@@ -5854,7 +5862,7 @@ void *mas_prev_range(struct ma_state *mas, unsigned long min)
if (mas_prev_setup(mas, min, &entry))
return entry;
- return mas_prev_slot(mas, min, true, true);
+ return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_prev_range);
@@ -5897,7 +5905,8 @@ EXPORT_SYMBOL_GPL(mt_prev);
*/
void mas_pause(struct ma_state *mas)
{
- mas->node = MAS_PAUSE;
+ mas->status = ma_pause;
+ mas->node = NULL;
}
EXPORT_SYMBOL_GPL(mas_pause);
@@ -5909,35 +5918,54 @@ EXPORT_SYMBOL_GPL(mas_pause);
*
* Returns: True if entry is the answer, false otherwise.
*/
-static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
- void **entry)
+static __always_inline bool mas_find_setup(struct ma_state *mas, unsigned long max, void **entry)
{
- if (mas_is_active(mas)) {
+ switch (mas->status) {
+ case ma_active:
if (mas->last < max)
return false;
-
return true;
- }
-
- if (mas_is_paused(mas)) {
+ case ma_start:
+ break;
+ case ma_pause:
if (unlikely(mas->last >= max))
return true;
mas->index = ++mas->last;
- mas->node = MAS_START;
- } else if (mas_is_none(mas)) {
+ mas->status = ma_start;
+ break;
+ case ma_none:
if (unlikely(mas->last >= max))
return true;
mas->index = mas->last;
- mas->node = MAS_START;
- } else if (mas_is_overflow(mas) || mas_is_underflow(mas)) {
- if (mas->index > max) {
- mas->node = MAS_OVERFLOW;
+ mas->status = ma_start;
+ break;
+ case ma_underflow:
+ /* mas is pointing at entry before unable to go lower */
+ if (unlikely(mas->index >= max)) {
+ mas->status = ma_overflow;
return true;
}
- mas->node = MAS_START;
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_overflow:
+ if (unlikely(mas->last >= max))
+ return true;
+
+ mas->status = ma_active;
+ *entry = mas_walk(mas);
+ if (*entry)
+ return true;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
if (mas_is_start(mas)) {
@@ -5951,12 +5979,11 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
}
- if (unlikely(!mas_searchable(mas))) {
- if (unlikely(mas_is_ptr(mas)))
- goto ptr_out_of_range;
+ if (unlikely(mas_is_ptr(mas)))
+ goto ptr_out_of_range;
+ if (unlikely(mas_is_none(mas)))
return true;
- }
if (mas->index == max)
return true;
@@ -5964,7 +5991,7 @@ static inline bool mas_find_setup(struct ma_state *mas, unsigned long max,
return false;
ptr_out_of_range:
- mas->node = MAS_NONE;
+ mas->status = ma_none;
mas->index = 1;
mas->last = ULONG_MAX;
return true;
@@ -5978,7 +6005,7 @@ ptr_out_of_range:
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_overflow.
*
* Return: The entry or %NULL.
*/
@@ -5990,7 +6017,10 @@ void *mas_find(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, false, false);
+ entry = mas_next_slot(mas, max, false);
+ /* Ignore overflow */
+ mas->status = ma_active;
+ return entry;
}
EXPORT_SYMBOL_GPL(mas_find);
@@ -6002,7 +6032,7 @@ EXPORT_SYMBOL_GPL(mas_find);
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_overflow.
*
* Return: The entry or %NULL.
*/
@@ -6014,7 +6044,7 @@ void *mas_find_range(struct ma_state *mas, unsigned long max)
return entry;
/* Retries on dead nodes handled by mas_next_slot */
- return mas_next_slot(mas, max, true, false);
+ return mas_next_slot(mas, max, true);
}
EXPORT_SYMBOL_GPL(mas_find_range);
@@ -6026,36 +6056,48 @@ EXPORT_SYMBOL_GPL(mas_find_range);
*
* Returns: True if entry is the answer, false otherwise.
*/
-static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
+static bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
void **entry)
{
- if (mas_is_active(mas)) {
- if (mas->index > min)
- return false;
- return true;
- }
-
- if (mas_is_paused(mas)) {
+ switch (mas->status) {
+ case ma_active:
+ goto active;
+ case ma_start:
+ break;
+ case ma_pause:
if (unlikely(mas->index <= min)) {
- mas->node = MAS_NONE;
+ mas->status = ma_underflow;
return true;
}
- mas->node = MAS_START;
mas->last = --mas->index;
- } else if (mas_is_none(mas)) {
+ mas->status = ma_start;
+ break;
+ case ma_none:
if (mas->index <= min)
goto none;
mas->last = mas->index;
- mas->node = MAS_START;
- } else if (mas_is_underflow(mas) || mas_is_overflow(mas)) {
- if (mas->last <= min) {
- mas->node = MAS_UNDERFLOW;
+ mas->status = ma_start;
+ break;
+ case ma_overflow: /* user expects the mas to be one after where it is */
+ if (unlikely(mas->index <= min)) {
+ mas->status = ma_underflow;
return true;
}
- mas->node = MAS_START;
+ mas->status = ma_active;
+ break;
+ case ma_underflow: /* user expects the mas to be one before where it is */
+ if (unlikely(mas->index <= min))
+ return true;
+
+ mas->status = ma_active;
+ break;
+ case ma_root:
+ break;
+ case ma_error:
+ return true;
}
if (mas_is_start(mas)) {
@@ -6068,29 +6110,28 @@ static inline bool mas_find_rev_setup(struct ma_state *mas, unsigned long min,
return true;
}
- if (unlikely(!mas_searchable(mas))) {
- if (mas_is_ptr(mas))
- goto none;
+ if (unlikely(mas_is_ptr(mas)))
+ goto none;
- if (mas_is_none(mas)) {
- /*
- * Walked to the location, and there was nothing so the
- * previous location is 0.
- */
- mas->last = mas->index = 0;
- mas->node = MAS_ROOT;
- *entry = mas_root(mas);
- return true;
- }
+ if (unlikely(mas_is_none(mas))) {
+ /*
+ * Walked to the location, and there was nothing so the previous
+ * location is 0.
+ */
+ mas->last = mas->index = 0;
+ mas->status = ma_root;
+ *entry = mas_root(mas);
+ return true;
}
+active:
if (mas->index < min)
return true;
return false;
none:
- mas->node = MAS_NONE;
+ mas->status = ma_none;
return true;
}
@@ -6103,7 +6144,7 @@ none:
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_underflow.
*
* Return: The entry or %NULL.
*/
@@ -6115,7 +6156,7 @@ void *mas_find_rev(struct ma_state *mas, unsigned long min)
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, false, false);
+ return mas_prev_slot(mas, min, false);
}
EXPORT_SYMBOL_GPL(mas_find_rev);
@@ -6129,7 +6170,7 @@ EXPORT_SYMBOL_GPL(mas_find_rev);
*
* Must hold rcu_read_lock or the write lock.
* If an entry exists, last and index are updated accordingly.
- * May set @mas->node to MAS_NONE.
+ * May set @mas->status to ma_underflow.
*
* Return: The entry or %NULL.
*/
@@ -6141,7 +6182,7 @@ void *mas_find_range_rev(struct ma_state *mas, unsigned long min)
return entry;
/* Retries on dead nodes handled by mas_prev_slot */
- return mas_prev_slot(mas, min, true, false);
+ return mas_prev_slot(mas, min, true);
}
EXPORT_SYMBOL_GPL(mas_find_range_rev);
@@ -6161,8 +6202,8 @@ void *mas_erase(struct ma_state *mas)
void *entry;
MA_WR_STATE(wr_mas, mas, NULL);
- if (mas_is_none(mas) || mas_is_paused(mas))
- mas->node = MAS_START;
+ if (!mas_is_active(mas) || !mas_is_start(mas))
+ mas->status = ma_start;
/* Retry unnecessary when holding the write lock. */
entry = mas_state_walk(mas);
@@ -6207,7 +6248,7 @@ bool mas_nomem(struct ma_state *mas, gfp_t gfp)
if (!mas_allocated(mas))
return false;
- mas->node = MAS_START;
+ mas->status = ma_start;
return true;
}
@@ -6465,6 +6506,278 @@ void *mtree_erase(struct maple_tree *mt, unsigned long index)
}
EXPORT_SYMBOL(mtree_erase);
+/*
+ * mas_dup_free() - Free an incomplete duplication of a tree.
+ * @mas: The maple state of a incomplete tree.
+ *
+ * The parameter @mas->node passed in indicates that the allocation failed on
+ * this node. This function frees all nodes starting from @mas->node in the
+ * reverse order of mas_dup_build(). There is no need to hold the source tree
+ * lock at this time.
+ */
+static void mas_dup_free(struct ma_state *mas)
+{
+ struct maple_node *node;
+ enum maple_type type;
+ void __rcu **slots;
+ unsigned char count, i;
+
+ /* Maybe the first node allocation failed. */
+ if (mas_is_none(mas))
+ return;
+
+ while (!mte_is_root(mas->node)) {
+ mas_ascend(mas);
+ if (mas->offset) {
+ mas->offset--;
+ do {
+ mas_descend(mas);
+ mas->offset = mas_data_end(mas);
+ } while (!mte_is_leaf(mas->node));
+
+ mas_ascend(mas);
+ }
+
+ node = mte_to_node(mas->node);
+ type = mte_node_type(mas->node);
+ slots = ma_slots(node, type);
+ count = mas_data_end(mas) + 1;
+ for (i = 0; i < count; i++)
+ ((unsigned long *)slots)[i] &= ~MAPLE_NODE_MASK;
+ mt_free_bulk(count, slots);
+ }
+
+ node = mte_to_node(mas->node);
+ mt_free_one(node);
+}
+
+/*
+ * mas_copy_node() - Copy a maple node and replace the parent.
+ * @mas: The maple state of source tree.
+ * @new_mas: The maple state of new tree.
+ * @parent: The parent of the new node.
+ *
+ * Copy @mas->node to @new_mas->node, set @parent to be the parent of
+ * @new_mas->node. If memory allocation fails, @mas is set to -ENOMEM.
+ */
+static inline void mas_copy_node(struct ma_state *mas, struct ma_state *new_mas,
+ struct maple_pnode *parent)
+{
+ struct maple_node *node = mte_to_node(mas->node);
+ struct maple_node *new_node = mte_to_node(new_mas->node);
+ unsigned long val;
+
+ /* Copy the node completely. */
+ memcpy(new_node, node, sizeof(struct maple_node));
+ /* Update the parent node pointer. */
+ val = (unsigned long)node->parent & MAPLE_NODE_MASK;
+ new_node->parent = ma_parent_ptr(val | (unsigned long)parent);
+}
+
+/*
+ * mas_dup_alloc() - Allocate child nodes for a maple node.
+ * @mas: The maple state of source tree.
+ * @new_mas: The maple state of new tree.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * This function allocates child nodes for @new_mas->node during the duplication
+ * process. If memory allocation fails, @mas is set to -ENOMEM.
+ */
+static inline void mas_dup_alloc(struct ma_state *mas, struct ma_state *new_mas,
+ gfp_t gfp)
+{
+ struct maple_node *node = mte_to_node(mas->node);
+ struct maple_node *new_node = mte_to_node(new_mas->node);
+ enum maple_type type;
+ unsigned char request, count, i;
+ void __rcu **slots;
+ void __rcu **new_slots;
+ unsigned long val;
+
+ /* Allocate memory for child nodes. */
+ type = mte_node_type(mas->node);
+ new_slots = ma_slots(new_node, type);
+ request = mas_data_end(mas) + 1;
+ count = mt_alloc_bulk(gfp, request, (void **)new_slots);
+ if (unlikely(count < request)) {
+ memset(new_slots, 0, request * sizeof(void *));
+ mas_set_err(mas, -ENOMEM);
+ return;
+ }
+
+ /* Restore node type information in slots. */
+ slots = ma_slots(node, type);
+ for (i = 0; i < count; i++) {
+ val = (unsigned long)mt_slot_locked(mas->tree, slots, i);
+ val &= MAPLE_NODE_MASK;
+ ((unsigned long *)new_slots)[i] |= val;
+ }
+}
+
+/*
+ * mas_dup_build() - Build a new maple tree from a source tree
+ * @mas: The maple state of source tree, need to be in MAS_START state.
+ * @new_mas: The maple state of new tree, need to be in MAS_START state.
+ * @gfp: The GFP_FLAGS to use for allocations.
+ *
+ * This function builds a new tree in DFS preorder. If the memory allocation
+ * fails, the error code -ENOMEM will be set in @mas, and @new_mas points to the
+ * last node. mas_dup_free() will free the incomplete duplication of a tree.
+ *
+ * Note that the attributes of the two trees need to be exactly the same, and the
+ * new tree needs to be empty, otherwise -EINVAL will be set in @mas.
+ */
+static inline void mas_dup_build(struct ma_state *mas, struct ma_state *new_mas,
+ gfp_t gfp)
+{
+ struct maple_node *node;
+ struct maple_pnode *parent = NULL;
+ struct maple_enode *root;
+ enum maple_type type;
+
+ if (unlikely(mt_attr(mas->tree) != mt_attr(new_mas->tree)) ||
+ unlikely(!mtree_empty(new_mas->tree))) {
+ mas_set_err(mas, -EINVAL);
+ return;
+ }
+
+ root = mas_start(mas);
+ if (mas_is_ptr(mas) || mas_is_none(mas))
+ goto set_new_tree;
+
+ node = mt_alloc_one(gfp);
+ if (!node) {
+ new_mas->status = ma_none;
+ mas_set_err(mas, -ENOMEM);
+ return;
+ }
+
+ type = mte_node_type(mas->node);
+ root = mt_mk_node(node, type);
+ new_mas->node = root;
+ new_mas->min = 0;
+ new_mas->max = ULONG_MAX;
+ root = mte_mk_root(root);
+ while (1) {
+ mas_copy_node(mas, new_mas, parent);
+ if (!mte_is_leaf(mas->node)) {
+ /* Only allocate child nodes for non-leaf nodes. */
+ mas_dup_alloc(mas, new_mas, gfp);
+ if (unlikely(mas_is_err(mas)))
+ return;
+ } else {
+ /*
+ * This is the last leaf node and duplication is
+ * completed.
+ */
+ if (mas->max == ULONG_MAX)
+ goto done;
+
+ /* This is not the last leaf node and needs to go up. */
+ do {
+ mas_ascend(mas);
+ mas_ascend(new_mas);
+ } while (mas->offset == mas_data_end(mas));
+
+ /* Move to the next subtree. */
+ mas->offset++;
+ new_mas->offset++;
+ }
+
+ mas_descend(mas);
+ parent = ma_parent_ptr(mte_to_node(new_mas->node));
+ mas_descend(new_mas);
+ mas->offset = 0;
+ new_mas->offset = 0;
+ }
+done:
+ /* Specially handle the parent of the root node. */
+ mte_to_node(root)->parent = ma_parent_ptr(mas_tree_parent(new_mas));
+set_new_tree:
+ /* Make them the same height */
+ new_mas->tree->ma_flags = mas->tree->ma_flags;
+ rcu_assign_pointer(new_mas->tree->ma_root, root);
+}
+
+/**
+ * __mt_dup(): Duplicate an entire maple tree
+ * @mt: The source maple tree
+ * @new: The new maple tree
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
+ * traversal. It uses memcpy() to copy nodes in the source tree and allocate
+ * new child nodes in non-leaf nodes. The new node is exactly the same as the
+ * source node except for all the addresses stored in it. It will be faster than
+ * traversing all elements in the source tree and inserting them one by one into
+ * the new tree.
+ * The user needs to ensure that the attributes of the source tree and the new
+ * tree are the same, and the new tree needs to be an empty tree, otherwise
+ * -EINVAL will be returned.
+ * Note that the user needs to manually lock the source tree and the new tree.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
+ * the attributes of the two trees are different or the new tree is not an empty
+ * tree.
+ */
+int __mt_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
+{
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(new_mas, new, 0, 0);
+
+ mas_dup_build(&mas, &new_mas, gfp);
+ if (unlikely(mas_is_err(&mas))) {
+ ret = xa_err(mas.node);
+ if (ret == -ENOMEM)
+ mas_dup_free(&new_mas);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(__mt_dup);
+
+/**
+ * mtree_dup(): Duplicate an entire maple tree
+ * @mt: The source maple tree
+ * @new: The new maple tree
+ * @gfp: The GFP_FLAGS to use for allocations
+ *
+ * This function duplicates a maple tree in Depth-First Search (DFS) pre-order
+ * traversal. It uses memcpy() to copy nodes in the source tree and allocate
+ * new child nodes in non-leaf nodes. The new node is exactly the same as the
+ * source node except for all the addresses stored in it. It will be faster than
+ * traversing all elements in the source tree and inserting them one by one into
+ * the new tree.
+ * The user needs to ensure that the attributes of the source tree and the new
+ * tree are the same, and the new tree needs to be an empty tree, otherwise
+ * -EINVAL will be returned.
+ *
+ * Return: 0 on success, -ENOMEM if memory could not be allocated, -EINVAL If
+ * the attributes of the two trees are different or the new tree is not an empty
+ * tree.
+ */
+int mtree_dup(struct maple_tree *mt, struct maple_tree *new, gfp_t gfp)
+{
+ int ret = 0;
+ MA_STATE(mas, mt, 0, 0);
+ MA_STATE(new_mas, new, 0, 0);
+
+ mas_lock(&new_mas);
+ mas_lock_nested(&mas, SINGLE_DEPTH_NESTING);
+ mas_dup_build(&mas, &new_mas, gfp);
+ mas_unlock(&mas);
+ if (unlikely(mas_is_err(&mas))) {
+ ret = xa_err(mas.node);
+ if (ret == -ENOMEM)
+ mas_dup_free(&new_mas);
+ }
+
+ mas_unlock(&new_mas);
+ return ret;
+}
+EXPORT_SYMBOL(mtree_dup);
+
/**
* __mt_destroy() - Walk and free all nodes of a locked maple tree.
* @mt: The maple tree
@@ -6479,7 +6792,7 @@ void __mt_destroy(struct maple_tree *mt)
if (xa_is_node(root))
mte_destroy_walk(root, mt);
- mt->ma_flags = 0;
+ mt->ma_flags = mt_attr(mt);
}
EXPORT_SYMBOL_GPL(__mt_destroy);
@@ -6538,7 +6851,7 @@ retry:
if (entry)
goto unlock;
- while (mas_searchable(&mas) && (mas.last < max)) {
+ while (mas_is_active(&mas) && (mas.last < max)) {
entry = mas_next_entry(&mas, max);
if (likely(entry && !xa_is_zero(entry)))
break;
@@ -6620,26 +6933,6 @@ unsigned int mt_nr_allocated(void)
return kmem_cache_nr_allocated(maple_node_cache);
}
-/*
- * mas_dead_node() - Check if the maple state is pointing to a dead node.
- * @mas: The maple state
- * @index: The index to restore in @mas.
- *
- * Used in test code.
- * Return: 1 if @mas has been reset to MAS_START, 0 otherwise.
- */
-static inline int mas_dead_node(struct ma_state *mas, unsigned long index)
-{
- if (unlikely(!mas_searchable(mas) || mas_is_start(mas)))
- return 0;
-
- if (likely(!mte_dead_node(mas->node)))
- return 0;
-
- mas_rewalk(mas, index);
- return 1;
-}
-
void mt_cache_shrink(void)
{
}
@@ -6678,11 +6971,11 @@ static inline struct maple_enode *mas_get_slot(struct ma_state *mas,
static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
{
- struct maple_enode *p = MAS_NONE, *mn = mas->node;
+ struct maple_enode *p, *mn = mas->node;
unsigned long p_min, p_max;
mas_next_node(mas, mas_mn(mas), max);
- if (!mas_is_none(mas))
+ if (!mas_is_overflow(mas))
return;
if (mte_is_root(mn))
@@ -6695,7 +6988,7 @@ static void mas_dfs_postorder(struct ma_state *mas, unsigned long max)
p_min = mas->min;
p_max = mas->max;
mas_prev_node(mas, 0);
- } while (!mas_is_none(mas));
+ } while (!mas_is_underflow(mas));
mas->node = p;
mas->max = p_max;
@@ -6718,7 +7011,6 @@ static void mt_dump_range(unsigned long min, unsigned long max,
else
pr_info("%.*s%lx-%lx: ", depth * 2, spaces, min, max);
break;
- default:
case mt_dump_dec:
if (min == max)
pr_info("%.*s%lu: ", depth * 2, spaces, min);
@@ -6758,7 +7050,6 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
case mt_dump_hex:
pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
break;
- default:
case mt_dump_dec:
pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
}
@@ -6788,7 +7079,6 @@ static void mt_dump_range64(const struct maple_tree *mt, void *entry,
pr_err("node %p last (%lx) > max (%lx) at pivot %d!\n",
node, last, max, i);
break;
- default:
case mt_dump_dec:
pr_err("node %p last (%lu) > max (%lu) at pivot %d!\n",
node, last, max, i);
@@ -6813,7 +7103,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
case mt_dump_hex:
pr_cont("%lx ", node->gap[i]);
break;
- default:
case mt_dump_dec:
pr_cont("%lu ", node->gap[i]);
}
@@ -6824,7 +7113,6 @@ static void mt_dump_arange64(const struct maple_tree *mt, void *entry,
case mt_dump_hex:
pr_cont("%p %lX ", node->slot[i], node->pivot[i]);
break;
- default:
case mt_dump_dec:
pr_cont("%p %lu ", node->slot[i], node->pivot[i]);
}
@@ -6965,7 +7253,8 @@ static void mas_validate_gaps(struct ma_state *mas)
counted:
if (mt == maple_arange_64) {
- offset = ma_meta_gap(node, mt);
+ MT_BUG_ON(mas->tree, !gaps);
+ offset = ma_meta_gap(node);
if (offset > i) {
pr_err("gap offset %p[%u] is invalid\n", node, offset);
MT_BUG_ON(mas->tree, 1);
@@ -6977,7 +7266,6 @@ counted:
MT_BUG_ON(mas->tree, 1);
}
- MT_BUG_ON(mas->tree, !gaps);
for (i++ ; i < mt_slot_count(mte); i++) {
if (gaps[i] != 0) {
pr_err("gap %p[%u] beyond node limit != 0\n",
@@ -7155,7 +7443,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
MA_STATE(mas, mt, 0, 0);
mas_start(&mas);
- if (mas_is_none(&mas) || (mas.node == MAS_ROOT))
+ if (mas_is_none(&mas) || (mas_is_ptr(&mas)))
return;
while (!mte_is_leaf(mas.node))
@@ -7172,7 +7460,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
last = entry;
if (offset == mas_data_end(&mas)) {
mas_next_node(&mas, mas_mn(&mas), ULONG_MAX);
- if (mas_is_none(&mas))
+ if (mas_is_overflow(&mas))
return;
offset = 0;
slots = ma_slots(mte_to_node(mas.node),
@@ -7181,7 +7469,7 @@ static void mt_validate_nulls(struct maple_tree *mt)
offset++;
}
- } while (!mas_is_none(&mas));
+ } while (!mas_is_overflow(&mas));
}
/*
@@ -7196,13 +7484,13 @@ void mt_validate(struct maple_tree *mt)
MA_STATE(mas, mt, 0, 0);
rcu_read_lock();
mas_start(&mas);
- if (!mas_searchable(&mas))
+ if (!mas_is_active(&mas))
goto done;
while (!mte_is_leaf(mas.node))
mas_descend(&mas);
- while (!mas_is_none(&mas)) {
+ while (!mas_is_overflow(&mas)) {
MAS_WARN_ON(&mas, mte_dead_node(mas.node));
end = mas_data_end(&mas);
if (MAS_WARN_ON(&mas, (end < mt_min_slot_count(mas.node)) &&
@@ -7227,16 +7515,35 @@ EXPORT_SYMBOL_GPL(mt_validate);
void mas_dump(const struct ma_state *mas)
{
pr_err("MAS: tree=%p enode=%p ", mas->tree, mas->node);
- if (mas_is_none(mas))
- pr_err("(MAS_NONE) ");
- else if (mas_is_ptr(mas))
- pr_err("(MAS_ROOT) ");
- else if (mas_is_start(mas))
- pr_err("(MAS_START) ");
- else if (mas_is_paused(mas))
- pr_err("(MAS_PAUSED) ");
-
- pr_err("[%u] index=%lx last=%lx\n", mas->offset, mas->index, mas->last);
+ switch (mas->status) {
+ case ma_active:
+ pr_err("(ma_active)");
+ break;
+ case ma_none:
+ pr_err("(ma_none)");
+ break;
+ case ma_root:
+ pr_err("(ma_root)");
+ break;
+ case ma_start:
+ pr_err("(ma_start) ");
+ break;
+ case ma_pause:
+ pr_err("(ma_pause) ");
+ break;
+ case ma_overflow:
+ pr_err("(ma_overflow) ");
+ break;
+ case ma_underflow:
+ pr_err("(ma_underflow) ");
+ break;
+ case ma_error:
+ pr_err("(ma_error) ");
+ break;
+ }
+
+ pr_err("[%u/%u] index=%lx last=%lx\n", mas->offset, mas->end,
+ mas->index, mas->last);
pr_err(" min=%lx max=%lx alloc=%p, depth=%u, flags=%x\n",
mas->min, mas->max, mas->alloc, mas->depth, mas->mas_flags);
if (mas->index > mas->last)
@@ -7249,7 +7556,7 @@ void mas_wr_dump(const struct ma_wr_state *wr_mas)
pr_err("WR_MAS: node=%p r_min=%lx r_max=%lx\n",
wr_mas->node, wr_mas->r_min, wr_mas->r_max);
pr_err(" type=%u off_end=%u, node_end=%u, end_piv=%lx\n",
- wr_mas->type, wr_mas->offset_end, wr_mas->node_end,
+ wr_mas->type, wr_mas->offset_end, wr_mas->mas->end,
wr_mas->end_piv);
}
EXPORT_SYMBOL_GPL(mas_wr_dump);
diff --git a/lib/overflow_kunit.c b/lib/overflow_kunit.c
index 34db0b3aa502..c527f6b75789 100644
--- a/lib/overflow_kunit.c
+++ b/lib/overflow_kunit.c
@@ -6,6 +6,7 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <kunit/device.h>
#include <kunit/test.h>
#include <linux/device.h>
#include <linux/kernel.h>
@@ -618,7 +619,7 @@ static void overflow_allocation_test(struct kunit *test)
} while (0)
/* Create dummy device for devm_kmalloc()-family tests. */
- dev = root_device_register(device_name);
+ dev = kunit_device_register(test, device_name);
KUNIT_ASSERT_FALSE_MSG(test, IS_ERR(dev),
"Cannot register test device\n");
@@ -634,8 +635,6 @@ static void overflow_allocation_test(struct kunit *test)
check_allocation_overflow(devm_kmalloc);
check_allocation_overflow(devm_kzalloc);
- device_unregister(dev);
-
kunit_info(test, "%d allocation overflow tests finished\n", count);
#undef check_allocation_overflow
}
diff --git a/lib/raid6/s390vx.uc b/lib/raid6/s390vx.uc
index b25dfc9c7759..cd0b9e95f499 100644
--- a/lib/raid6/s390vx.uc
+++ b/lib/raid6/s390vx.uc
@@ -158,7 +158,7 @@ static void raid6_s390vx$#_xor_syndrome(int disks, int start, int stop,
static int raid6_s390vx$#_valid(void)
{
- return MACHINE_HAS_VX;
+ return cpu_has_vx();
}
const struct raid6_calls raid6_s390vx$# = {
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 2f5aa851834e..a0be5d05c7f0 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -18,11 +18,14 @@
#include <linux/jhash.h>
#include <linux/kernel.h>
#include <linux/kmsan.h>
+#include <linux/list.h>
#include <linux/mm.h>
#include <linux/mutex.h>
#include <linux/percpu.h>
#include <linux/printk.h>
+#include <linux/refcount.h>
#include <linux/slab.h>
+#include <linux/spinlock.h>
#include <linux/stacktrace.h>
#include <linux/stackdepot.h>
#include <linux/string.h>
@@ -32,14 +35,23 @@
#define DEPOT_HANDLE_BITS (sizeof(depot_stack_handle_t) * 8)
-#define DEPOT_VALID_BITS 1
#define DEPOT_POOL_ORDER 2 /* Pool size order, 4 pages */
#define DEPOT_POOL_SIZE (1LL << (PAGE_SHIFT + DEPOT_POOL_ORDER))
#define DEPOT_STACK_ALIGN 4
#define DEPOT_OFFSET_BITS (DEPOT_POOL_ORDER + PAGE_SHIFT - DEPOT_STACK_ALIGN)
-#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_VALID_BITS - \
- DEPOT_OFFSET_BITS - STACK_DEPOT_EXTRA_BITS)
+#define DEPOT_POOL_INDEX_BITS (DEPOT_HANDLE_BITS - DEPOT_OFFSET_BITS - \
+ STACK_DEPOT_EXTRA_BITS)
+#if IS_ENABLED(CONFIG_KMSAN) && CONFIG_STACKDEPOT_MAX_FRAMES >= 32
+/*
+ * KMSAN is frequently used in fuzzing scenarios and thus saves a lot of stack
+ * traces. As KMSAN does not support evicting stack traces from the stack
+ * depot, the stack depot capacity might be reached quickly with large stack
+ * records. Adjust the maximum number of stack depot pools for this case.
+ */
+#define DEPOT_POOLS_CAP (8192 * (CONFIG_STACKDEPOT_MAX_FRAMES / 16))
+#else
#define DEPOT_POOLS_CAP 8192
+#endif
#define DEPOT_MAX_POOLS \
(((1LL << (DEPOT_POOL_INDEX_BITS)) < DEPOT_POOLS_CAP) ? \
(1LL << (DEPOT_POOL_INDEX_BITS)) : DEPOT_POOLS_CAP)
@@ -50,19 +62,22 @@ union handle_parts {
struct {
u32 pool_index : DEPOT_POOL_INDEX_BITS;
u32 offset : DEPOT_OFFSET_BITS;
- u32 valid : DEPOT_VALID_BITS;
u32 extra : STACK_DEPOT_EXTRA_BITS;
};
};
struct stack_record {
- struct stack_record *next; /* Link in the hash table */
- u32 hash; /* Hash in the hash table */
+ struct list_head list; /* Links in hash table or freelist */
+ u32 hash; /* Hash in hash table */
u32 size; /* Number of stored frames */
union handle_parts handle;
- unsigned long entries[]; /* Variable-sized array of frames */
+ refcount_t count;
+ unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES]; /* Frames */
};
+#define DEPOT_STACK_RECORD_SIZE \
+ ALIGN(sizeof(struct stack_record), 1 << DEPOT_STACK_ALIGN)
+
static bool stack_depot_disabled;
static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_STACKDEPOT_ALWAYS_INIT);
static bool __stack_depot_early_init_passed __initdata;
@@ -75,40 +90,34 @@ static bool __stack_depot_early_init_passed __initdata;
/* Initial seed for jhash2. */
#define STACK_HASH_SEED 0x9747b28c
-/* Hash table of pointers to stored stack traces. */
-static struct stack_record **stack_table;
+/* Hash table of stored stack records. */
+static struct list_head *stack_table;
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */
static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */
static unsigned int stack_hash_mask;
-/* Array of memory regions that store stack traces. */
+/* Array of memory regions that store stack records. */
static void *stack_pools[DEPOT_MAX_POOLS];
-/* Currently used pool in stack_pools. */
-static int pool_index;
-/* Offset to the unused space in the currently used pool. */
-static size_t pool_offset;
-/* Lock that protects the variables above. */
-static DEFINE_RAW_SPINLOCK(pool_lock);
+/* Newly allocated pool that is not yet added to stack_pools. */
+static void *new_pool;
+/* Number of pools in stack_pools. */
+static int pools_num;
+/* Freelist of stack records within stack_pools. */
+static LIST_HEAD(free_stacks);
/*
* Stack depot tries to keep an extra pool allocated even before it runs out
- * of space in the currently used pool.
- * This flag marks that this next extra pool needs to be allocated and
- * initialized. It has the value 0 when either the next pool is not yet
- * initialized or the limit on the number of pools is reached.
+ * of space in the currently used pool. This flag marks whether this extra pool
+ * needs to be allocated. It has the value 0 when either an extra pool is not
+ * yet allocated or if the limit on the number of pools is reached.
*/
-static int next_pool_required = 1;
+static bool new_pool_required = true;
+/* Lock that protects the variables above. */
+static DEFINE_RWLOCK(pool_rwlock);
static int __init disable_stack_depot(char *str)
{
- int ret;
-
- ret = kstrtobool(str, &stack_depot_disabled);
- if (!ret && stack_depot_disabled) {
- pr_info("disabled\n");
- stack_table = NULL;
- }
- return 0;
+ return kstrtobool(str, &stack_depot_disabled);
}
early_param("stack_depot_disable", disable_stack_depot);
@@ -120,6 +129,15 @@ void __init stack_depot_request_early_init(void)
__stack_depot_early_init_requested = true;
}
+/* Initialize list_head's within the hash table. */
+static void init_stack_table(unsigned long entries)
+{
+ unsigned long i;
+
+ for (i = 0; i < entries; i++)
+ INIT_LIST_HEAD(&stack_table[i]);
+}
+
/* Allocates a hash table via memblock. Can only be used during early boot. */
int __init stack_depot_early_init(void)
{
@@ -131,6 +149,15 @@ int __init stack_depot_early_init(void)
__stack_depot_early_init_passed = true;
/*
+ * Print disabled message even if early init has not been requested:
+ * stack_depot_init() will not print one.
+ */
+ if (stack_depot_disabled) {
+ pr_info("disabled\n");
+ return 0;
+ }
+
+ /*
* If KASAN is enabled, use the maximum order: KASAN is frequently used
* in fuzzing scenarios, which leads to a large number of different
* stack traces being stored in stack depot.
@@ -138,21 +165,25 @@ int __init stack_depot_early_init(void)
if (kasan_enabled() && !stack_bucket_number_order)
stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
- if (!__stack_depot_early_init_requested || stack_depot_disabled)
+ /*
+ * Check if early init has been requested after setting
+ * stack_bucket_number_order: stack_depot_init() uses its value.
+ */
+ if (!__stack_depot_early_init_requested)
return 0;
/*
* If stack_bucket_number_order is not set, leave entries as 0 to rely
- * on the automatic calculations performed by alloc_large_system_hash.
+ * on the automatic calculations performed by alloc_large_system_hash().
*/
if (stack_bucket_number_order)
entries = 1UL << stack_bucket_number_order;
pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot",
- sizeof(struct stack_record *),
+ sizeof(struct list_head),
entries,
STACK_HASH_TABLE_SCALE,
- HASH_EARLY | HASH_ZERO,
+ HASH_EARLY,
NULL,
&stack_hash_mask,
1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
@@ -162,6 +193,14 @@ int __init stack_depot_early_init(void)
stack_depot_disabled = true;
return -ENOMEM;
}
+ if (!entries) {
+ /*
+ * Obtain the number of entries that was calculated by
+ * alloc_large_system_hash().
+ */
+ entries = stack_hash_mask + 1;
+ }
+ init_stack_table(entries);
return 0;
}
@@ -202,7 +241,7 @@ int stack_depot_init(void)
entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
- stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
+ stack_table = kvcalloc(entries, sizeof(struct list_head), GFP_KERNEL);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
@@ -210,6 +249,7 @@ int stack_depot_init(void)
goto out_unlock;
}
stack_hash_mask = entries - 1;
+ init_stack_table(entries);
out_unlock:
mutex_unlock(&stack_depot_init_mutex);
@@ -218,41 +258,103 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(stack_depot_init);
-/* Uses preallocated memory to initialize a new stack depot pool. */
-static void depot_init_pool(void **prealloc)
+/* Initializes a stack depol pool. */
+static void depot_init_pool(void *pool)
{
+ int offset;
+
+ lockdep_assert_held_write(&pool_rwlock);
+
+ WARN_ON(!list_empty(&free_stacks));
+
+ /* Initialize handles and link stack records into the freelist. */
+ for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
+ offset += DEPOT_STACK_RECORD_SIZE) {
+ struct stack_record *stack = pool + offset;
+
+ stack->handle.pool_index = pools_num;
+ stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
+ stack->handle.extra = 0;
+
+ list_add(&stack->list, &free_stacks);
+ }
+
+ /* Save reference to the pool to be used by depot_fetch_stack(). */
+ stack_pools[pools_num] = pool;
+ pools_num++;
+}
+
+/* Keeps the preallocated memory to be used for a new stack depot pool. */
+static void depot_keep_new_pool(void **prealloc)
+{
+ lockdep_assert_held_write(&pool_rwlock);
+
/*
- * If the next pool is already initialized or the maximum number of
+ * If a new pool is already saved or the maximum number of
* pools is reached, do not use the preallocated memory.
- * smp_load_acquire() here pairs with smp_store_release() below and
- * in depot_alloc_stack().
*/
- if (!smp_load_acquire(&next_pool_required))
+ if (!new_pool_required)
return;
- /* Check if the current pool is not yet allocated. */
- if (stack_pools[pool_index] == NULL) {
- /* Use the preallocated memory for the current pool. */
- stack_pools[pool_index] = *prealloc;
+ /*
+ * Use the preallocated memory for the new pool
+ * as long as we do not exceed the maximum number of pools.
+ */
+ if (pools_num < DEPOT_MAX_POOLS) {
+ new_pool = *prealloc;
*prealloc = NULL;
- } else {
- /*
- * Otherwise, use the preallocated memory for the next pool
- * as long as we do not exceed the maximum number of pools.
- */
- if (pool_index + 1 < DEPOT_MAX_POOLS) {
- stack_pools[pool_index + 1] = *prealloc;
- *prealloc = NULL;
- }
- /*
- * At this point, either the next pool is initialized or the
- * maximum number of pools is reached. In either case, take
- * note that initializing another pool is not required.
- * This smp_store_release pairs with smp_load_acquire() above
- * and in stack_depot_save().
- */
- smp_store_release(&next_pool_required, 0);
}
+
+ /*
+ * At this point, either a new pool is kept or the maximum
+ * number of pools is reached. In either case, take note that
+ * keeping another pool is not required.
+ */
+ new_pool_required = false;
+}
+
+/* Updates references to the current and the next stack depot pools. */
+static bool depot_update_pools(void **prealloc)
+{
+ lockdep_assert_held_write(&pool_rwlock);
+
+ /* Check if we still have objects in the freelist. */
+ if (!list_empty(&free_stacks))
+ goto out_keep_prealloc;
+
+ /* Check if we have a new pool saved and use it. */
+ if (new_pool) {
+ depot_init_pool(new_pool);
+ new_pool = NULL;
+
+ /* Take note that we might need a new new_pool. */
+ if (pools_num < DEPOT_MAX_POOLS)
+ new_pool_required = true;
+
+ /* Try keeping the preallocated memory for new_pool. */
+ goto out_keep_prealloc;
+ }
+
+ /* Bail out if we reached the pool limit. */
+ if (unlikely(pools_num >= DEPOT_MAX_POOLS)) {
+ WARN_ONCE(1, "Stack depot reached limit capacity");
+ return false;
+ }
+
+ /* Check if we have preallocated memory and use it. */
+ if (*prealloc) {
+ depot_init_pool(*prealloc);
+ *prealloc = NULL;
+ return true;
+ }
+
+ return false;
+
+out_keep_prealloc:
+ /* Keep the preallocated memory for a new pool if required. */
+ if (*prealloc)
+ depot_keep_new_pool(prealloc);
+ return true;
}
/* Allocates a new stack in a stack depot pool. */
@@ -260,62 +362,72 @@ static struct stack_record *
depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
{
struct stack_record *stack;
- size_t required_size = struct_size(stack, entries, size);
- required_size = ALIGN(required_size, 1 << DEPOT_STACK_ALIGN);
+ lockdep_assert_held_write(&pool_rwlock);
- /* Check if there is not enough space in the current pool. */
- if (unlikely(pool_offset + required_size > DEPOT_POOL_SIZE)) {
- /* Bail out if we reached the pool limit. */
- if (unlikely(pool_index + 1 >= DEPOT_MAX_POOLS)) {
- WARN_ONCE(1, "Stack depot reached limit capacity");
- return NULL;
- }
+ /* Update current and new pools if required and possible. */
+ if (!depot_update_pools(prealloc))
+ return NULL;
- /*
- * Move on to the next pool.
- * WRITE_ONCE pairs with potential concurrent read in
- * stack_depot_fetch().
- */
- WRITE_ONCE(pool_index, pool_index + 1);
- pool_offset = 0;
- /*
- * If the maximum number of pools is not reached, take note
- * that the next pool needs to initialized.
- * smp_store_release() here pairs with smp_load_acquire() in
- * stack_depot_save() and depot_init_pool().
- */
- if (pool_index + 1 < DEPOT_MAX_POOLS)
- smp_store_release(&next_pool_required, 1);
- }
+ /* Check if we have a stack record to save the stack trace. */
+ if (list_empty(&free_stacks))
+ return NULL;
- /* Assign the preallocated memory to a pool if required. */
- if (*prealloc)
- depot_init_pool(prealloc);
+ /* Get and unlink the first entry from the freelist. */
+ stack = list_first_entry(&free_stacks, struct stack_record, list);
+ list_del(&stack->list);
- /* Check if we have a pool to save the stack trace. */
- if (stack_pools[pool_index] == NULL)
- return NULL;
+ /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
+ if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
+ size = CONFIG_STACKDEPOT_MAX_FRAMES;
/* Save the stack trace. */
- stack = stack_pools[pool_index] + pool_offset;
stack->hash = hash;
stack->size = size;
- stack->handle.pool_index = pool_index;
- stack->handle.offset = pool_offset >> DEPOT_STACK_ALIGN;
- stack->handle.valid = 1;
- stack->handle.extra = 0;
+ /* stack->handle is already filled in by depot_init_pool(). */
+ refcount_set(&stack->count, 1);
memcpy(stack->entries, entries, flex_array_size(stack, entries, size));
- pool_offset += required_size;
+
/*
* Let KMSAN know the stored stack record is initialized. This shall
* prevent false positive reports if instrumented code accesses it.
*/
- kmsan_unpoison_memory(stack, required_size);
+ kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
return stack;
}
+static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
+{
+ union handle_parts parts = { .handle = handle };
+ void *pool;
+ size_t offset = parts.offset << DEPOT_STACK_ALIGN;
+ struct stack_record *stack;
+
+ lockdep_assert_held(&pool_rwlock);
+
+ if (parts.pool_index > pools_num) {
+ WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
+ parts.pool_index, pools_num, handle);
+ return NULL;
+ }
+
+ pool = stack_pools[parts.pool_index];
+ if (!pool)
+ return NULL;
+
+ stack = pool + offset;
+ return stack;
+}
+
+/* Links stack into the freelist. */
+static void depot_free_stack(struct stack_record *stack)
+{
+ lockdep_assert_held_write(&pool_rwlock);
+
+ list_add(&stack->list, &free_stacks);
+}
+
/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
@@ -340,13 +452,17 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
}
/* Finds a stack in a bucket of the hash table. */
-static inline struct stack_record *find_stack(struct stack_record *bucket,
+static inline struct stack_record *find_stack(struct list_head *bucket,
unsigned long *entries, int size,
u32 hash)
{
+ struct list_head *pos;
struct stack_record *found;
- for (found = bucket; found; found = found->next) {
+ lockdep_assert_held(&pool_rwlock);
+
+ list_for_each(pos, bucket) {
+ found = list_entry(pos, struct stack_record, list);
if (found->hash == hash &&
found->size == size &&
!stackdepot_memcmp(entries, found->entries, size))
@@ -355,17 +471,24 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
return NULL;
}
-depot_stack_handle_t __stack_depot_save(unsigned long *entries,
- unsigned int nr_entries,
- gfp_t alloc_flags, bool can_alloc)
+depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
+ unsigned int nr_entries,
+ gfp_t alloc_flags,
+ depot_flags_t depot_flags)
{
- struct stack_record *found = NULL, **bucket;
- union handle_parts retval = { .handle = 0 };
+ struct list_head *bucket;
+ struct stack_record *found = NULL;
+ depot_stack_handle_t handle = 0;
struct page *page = NULL;
void *prealloc = NULL;
+ bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
+ bool need_alloc = false;
unsigned long flags;
u32 hash;
+ if (WARN_ON(depot_flags & ~STACK_DEPOT_FLAGS_MASK))
+ return 0;
+
/*
* If this stack trace is from an interrupt, including anything before
* interrupt entry usually leads to unbounded stack depot growth.
@@ -377,28 +500,36 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
nr_entries = filter_irq_stacks(entries, nr_entries);
if (unlikely(nr_entries == 0) || stack_depot_disabled)
- goto fast_exit;
+ return 0;
hash = hash_stack(entries, nr_entries);
bucket = &stack_table[hash & stack_hash_mask];
- /*
- * Fast path: look the stack trace up without locking.
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |bucket| below.
- */
- found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
- if (found)
+ read_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
+
+ /* Fast path: look the stack trace up without full locking. */
+ found = find_stack(bucket, entries, nr_entries, hash);
+ if (found) {
+ if (depot_flags & STACK_DEPOT_FLAG_GET)
+ refcount_inc(&found->count);
+ printk_deferred_exit();
+ read_unlock_irqrestore(&pool_rwlock, flags);
goto exit;
+ }
+
+ /* Take note if another stack pool needs to be allocated. */
+ if (new_pool_required)
+ need_alloc = true;
+
+ printk_deferred_exit();
+ read_unlock_irqrestore(&pool_rwlock, flags);
/*
- * Check if another stack pool needs to be initialized. If so, allocate
- * the memory now - we won't be able to do that under the lock.
- *
- * The smp_load_acquire() here pairs with smp_store_release() to
- * |next_pool_inited| in depot_alloc_stack() and depot_init_pool().
+ * Allocate memory for a new pool if required now:
+ * we won't be able to do that under the lock.
*/
- if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
+ if (unlikely(can_alloc && need_alloc)) {
/*
* Zero out zone modifiers, as we don't have specific zone
* requirements. Keep the flags related to allocation in atomic
@@ -412,63 +543,56 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
prealloc = page_address(page);
}
- raw_spin_lock_irqsave(&pool_lock, flags);
+ write_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
- found = find_stack(*bucket, entries, nr_entries, hash);
+ found = find_stack(bucket, entries, nr_entries, hash);
if (!found) {
struct stack_record *new =
depot_alloc_stack(entries, nr_entries, hash, &prealloc);
if (new) {
- new->next = *bucket;
- /*
- * This smp_store_release() pairs with
- * smp_load_acquire() from |bucket| above.
- */
- smp_store_release(bucket, new);
+ list_add(&new->list, bucket);
found = new;
}
- } else if (prealloc) {
+ } else {
+ if (depot_flags & STACK_DEPOT_FLAG_GET)
+ refcount_inc(&found->count);
/*
* Stack depot already contains this stack trace, but let's
- * keep the preallocated memory for the future.
+ * keep the preallocated memory for future.
*/
- depot_init_pool(&prealloc);
+ if (prealloc)
+ depot_keep_new_pool(&prealloc);
}
- raw_spin_unlock_irqrestore(&pool_lock, flags);
+ printk_deferred_exit();
+ write_unlock_irqrestore(&pool_rwlock, flags);
exit:
if (prealloc) {
/* Stack depot didn't use this memory, free it. */
free_pages((unsigned long)prealloc, DEPOT_POOL_ORDER);
}
if (found)
- retval.handle = found->handle.handle;
-fast_exit:
- return retval.handle;
+ handle = found->handle.handle;
+ return handle;
}
-EXPORT_SYMBOL_GPL(__stack_depot_save);
+EXPORT_SYMBOL_GPL(stack_depot_save_flags);
depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries,
gfp_t alloc_flags)
{
- return __stack_depot_save(entries, nr_entries, alloc_flags, true);
+ return stack_depot_save_flags(entries, nr_entries, alloc_flags,
+ STACK_DEPOT_FLAG_CAN_ALLOC);
}
EXPORT_SYMBOL_GPL(stack_depot_save);
unsigned int stack_depot_fetch(depot_stack_handle_t handle,
unsigned long **entries)
{
- union handle_parts parts = { .handle = handle };
- /*
- * READ_ONCE pairs with potential concurrent write in
- * depot_alloc_stack.
- */
- int pool_index_cached = READ_ONCE(pool_index);
- void *pool;
- size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
+ unsigned long flags;
*entries = NULL;
/*
@@ -477,24 +601,51 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
*/
kmsan_unpoison_memory(entries, sizeof(*entries));
- if (!handle)
+ if (!handle || stack_depot_disabled)
return 0;
- if (parts.pool_index > pool_index_cached) {
- WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
- parts.pool_index, pool_index_cached, handle);
- return 0;
- }
- pool = stack_pools[parts.pool_index];
- if (!pool)
- return 0;
- stack = pool + offset;
+ read_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
+
+ stack = depot_fetch_stack(handle);
+
+ printk_deferred_exit();
+ read_unlock_irqrestore(&pool_rwlock, flags);
*entries = stack->entries;
return stack->size;
}
EXPORT_SYMBOL_GPL(stack_depot_fetch);
+void stack_depot_put(depot_stack_handle_t handle)
+{
+ struct stack_record *stack;
+ unsigned long flags;
+
+ if (!handle || stack_depot_disabled)
+ return;
+
+ write_lock_irqsave(&pool_rwlock, flags);
+ printk_deferred_enter();
+
+ stack = depot_fetch_stack(handle);
+ if (WARN_ON(!stack))
+ goto out;
+
+ if (refcount_dec_and_test(&stack->count)) {
+ /* Unlink stack from the hash table. */
+ list_del(&stack->list);
+
+ /* Free stack. */
+ depot_free_stack(stack);
+ }
+
+out:
+ printk_deferred_exit();
+ write_unlock_irqrestore(&pool_rwlock, flags);
+}
+EXPORT_SYMBOL_GPL(stack_depot_put);
+
void stack_depot_print(depot_stack_handle_t stack)
{
unsigned long *entries;
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 7916503e6a6a..569e6d2dc55c 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5144,22 +5144,6 @@ static struct bpf_test tests[] = {
{ },
{ { 0, 0x1 } },
},
- {
- "ALU_MOVSX | BPF_W",
- .u.insns_int = {
- BPF_LD_IMM64(R2, 0x00000000deadbeefLL),
- BPF_LD_IMM64(R3, 0xdeadbeefdeadbeefLL),
- BPF_MOVSX32_REG(R1, R3, 32),
- BPF_JMP_REG(BPF_JEQ, R2, R1, 2),
- BPF_MOV32_IMM(R0, 2),
- BPF_EXIT_INSN(),
- BPF_MOV32_IMM(R0, 1),
- BPF_EXIT_INSN(),
- },
- INTERNAL,
- { },
- { { 0, 0x1 } },
- },
/* MOVSX64 REG */
{
"ALU64_MOVSX | BPF_B",
@@ -6293,7 +6277,7 @@ static struct bpf_test tests[] = {
},
/* BPF_ALU64 | BPF_MOD | BPF_K off=1 (SMOD64) */
{
- "ALU64_SMOD_X: -7 % 2 = -1",
+ "ALU64_SMOD_K: -7 % 2 = -1",
.u.insns_int = {
BPF_LD_IMM64(R0, -7),
BPF_ALU64_IMM_OFF(BPF_MOD, R0, 2, 1),
@@ -12215,7 +12199,6 @@ static struct bpf_test tests[] = {
BPF_JMP32_IMM_ZEXT(JLE),
BPF_JMP32_IMM_ZEXT(JSGT),
BPF_JMP32_IMM_ZEXT(JSGE),
- BPF_JMP32_IMM_ZEXT(JSGT),
BPF_JMP32_IMM_ZEXT(JSLT),
BPF_JMP32_IMM_ZEXT(JSLE),
#undef BPF_JMP2_IMM_ZEXT
@@ -12251,7 +12234,6 @@ static struct bpf_test tests[] = {
BPF_JMP32_REG_ZEXT(JLE),
BPF_JMP32_REG_ZEXT(JSGT),
BPF_JMP32_REG_ZEXT(JSGE),
- BPF_JMP32_REG_ZEXT(JSGT),
BPF_JMP32_REG_ZEXT(JSLT),
BPF_JMP32_REG_ZEXT(JSLE),
#undef BPF_JMP2_REG_ZEXT
diff --git a/lib/test_firmware.c b/lib/test_firmware.c
index add4699fc6cd..9cfdcd6d21db 100644
--- a/lib/test_firmware.c
+++ b/lib/test_firmware.c
@@ -1132,6 +1132,7 @@ static const char * const fw_upload_err_str[] = {
[FW_UPLOAD_ERR_INVALID_SIZE] = "invalid-file-size",
[FW_UPLOAD_ERR_RW_ERROR] = "read-write-error",
[FW_UPLOAD_ERR_WEAROUT] = "flash-wearout",
+ [FW_UPLOAD_ERR_FW_INVALID] = "firmware-invalid",
};
static void upload_err_inject_error(struct test_firmware_upload *tst,
diff --git a/lib/test_ida.c b/lib/test_ida.c
index b06880625961..072a49897e71 100644
--- a/lib/test_ida.c
+++ b/lib/test_ida.c
@@ -13,7 +13,7 @@ static unsigned int tests_run;
static unsigned int tests_passed;
#ifdef __KERNEL__
-void ida_dump(struct ida *ida) { }
+static void ida_dump(struct ida *ida) { }
#endif
#define IDA_BUG_ON(ida, x) do { \
tests_run++; \
@@ -150,6 +150,45 @@ static void ida_check_conv(struct ida *ida)
IDA_BUG_ON(ida, !ida_is_empty(ida));
}
+/*
+ * Check various situations where we attempt to free an ID we don't own.
+ */
+static void ida_check_bad_free(struct ida *ida)
+{
+ unsigned long i;
+
+ printk("vvv Ignore \"not allocated\" warnings\n");
+ /* IDA is empty; all of these will fail */
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single value entry */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 3, GFP_KERNEL) != 3);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a single bitmap */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, 1023, GFP_KERNEL) != 1023);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+
+ /* IDA contains a tree */
+ IDA_BUG_ON(ida, ida_alloc_min(ida, (1 << 20) - 1, GFP_KERNEL) != (1 << 20) - 1);
+ ida_free(ida, 0);
+ for (i = 0; i < 31; i++)
+ ida_free(ida, 1 << i);
+ printk("^^^ \"not allocated\" warnings over\n");
+
+ ida_free(ida, 3);
+ ida_free(ida, 1023);
+ ida_free(ida, (1 << 20) - 1);
+
+ IDA_BUG_ON(ida, !ida_is_empty(ida));
+}
+
static DEFINE_IDA(ida);
static int ida_checks(void)
@@ -162,6 +201,7 @@ static int ida_checks(void)
ida_check_leaf(&ida, 1024 * 64);
ida_check_max(&ida);
ida_check_conv(&ida);
+ ida_check_bad_free(&ida);
printk("IDA: %u of %u tests passed\n", tests_passed, tests_run);
return (tests_run != tests_passed) ? 0 : -EINVAL;
diff --git a/lib/test_maple_tree.c b/lib/test_maple_tree.c
index 464eeb90d5ad..29185ac5c727 100644
--- a/lib/test_maple_tree.c
+++ b/lib/test_maple_tree.c
@@ -43,6 +43,7 @@ atomic_t maple_tree_tests_passed;
/* #define BENCH_NODE_STORE */
/* #define BENCH_AWALK */
/* #define BENCH_WALK */
+/* #define BENCH_LOAD */
/* #define BENCH_MT_FOR_EACH */
/* #define BENCH_FORK */
/* #define BENCH_MAS_FOR_EACH */
@@ -54,6 +55,11 @@ atomic_t maple_tree_tests_passed;
#else
#define cond_resched() do {} while (0)
#endif
+
+#define mas_is_none(x) ((x)->status == ma_none)
+#define mas_is_overflow(x) ((x)->status == ma_overflow)
+#define mas_is_underflow(x) ((x)->status == ma_underflow)
+
static int __init mtree_insert_index(struct maple_tree *mt,
unsigned long index, gfp_t gfp)
{
@@ -582,7 +588,7 @@ static noinline void __init check_find(struct maple_tree *mt)
MT_BUG_ON(mt, last != mas.last);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
mas.index = ULONG_MAX;
mas.last = ULONG_MAX;
entry2 = mas_prev(&mas, 0);
@@ -1749,6 +1755,19 @@ static noinline void __init bench_walk(struct maple_tree *mt)
}
#endif
+#if defined(BENCH_LOAD)
+static noinline void __init bench_load(struct maple_tree *mt)
+{
+ int i, max = 2500, count = 550000000;
+
+ for (i = 0; i < max; i += 10)
+ mtree_store_range(mt, i, i + 5, xa_mk_value(i), GFP_KERNEL);
+
+ for (i = 0; i < count; i++)
+ mtree_load(mt, 1470);
+}
+#endif
+
#if defined(BENCH_MT_FOR_EACH)
static noinline void __init bench_mt_for_each(struct maple_tree *mt)
{
@@ -1834,47 +1853,48 @@ static noinline void __init bench_mas_prev(struct maple_tree *mt)
}
#endif
/* check_forking - simulate the kernel forking sequence with the tree. */
-static noinline void __init check_forking(struct maple_tree *mt)
+static noinline void __init check_forking(void)
{
-
- struct maple_tree newmt;
- int i, nr_entries = 134;
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, ret;
void *val;
- MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
- struct rw_semaphore newmt_lock;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+ init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
- for (i = 0; i <= nr_entries; i++)
- mtree_store_range(mt, i*10, i*10 + 5,
- xa_mk_value(i), GFP_KERNEL);
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
- mt_set_non_kernel(99999);
mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&newmt, &newmt_lock);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
- down_write(&newmt_lock);
- mas.index = 0;
- mas.last = 0;
- if (mas_expected_entries(&newmas, nr_entries)) {
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
pr_err("OOM!");
BUG_ON(1);
}
- rcu_read_lock();
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
- }
- rcu_read_unlock();
+
mas_destroy(&newmas);
+ mas_destroy(&mas);
mt_validate(&newmt);
- mt_set_non_kernel(0);
__mt_destroy(&newmt);
+ __mt_destroy(&mt);
up_write(&newmt_lock);
+ up_write(&mt_lock);
}
static noinline void __init check_iteration(struct maple_tree *mt)
@@ -1977,49 +1997,51 @@ static noinline void __init check_mas_store_gfp(struct maple_tree *mt)
}
#if defined(BENCH_FORK)
-static noinline void __init bench_forking(struct maple_tree *mt)
+static noinline void __init bench_forking(void)
{
-
- struct maple_tree newmt;
- int i, nr_entries = 134, nr_fork = 80000;
+ struct maple_tree mt, newmt;
+ int i, nr_entries = 134, nr_fork = 80000, ret;
void *val;
- MA_STATE(mas, mt, 0, 0);
- MA_STATE(newmas, mt, 0, 0);
- struct rw_semaphore newmt_lock;
+ MA_STATE(mas, &mt, 0, 0);
+ MA_STATE(newmas, &newmt, 0, 0);
+ struct rw_semaphore mt_lock, newmt_lock;
+ init_rwsem(&mt_lock);
init_rwsem(&newmt_lock);
- mt_set_external_lock(&newmt, &newmt_lock);
- for (i = 0; i <= nr_entries; i++)
- mtree_store_range(mt, i*10, i*10 + 5,
- xa_mk_value(i), GFP_KERNEL);
+ mt_init_flags(&mt, MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&mt, &mt_lock);
+
+ down_write(&mt_lock);
+ for (i = 0; i <= nr_entries; i++) {
+ mas_set_range(&mas, i*10, i*10 + 5);
+ mas_store_gfp(&mas, xa_mk_value(i), GFP_KERNEL);
+ }
for (i = 0; i < nr_fork; i++) {
- mt_set_non_kernel(99999);
- mt_init_flags(&newmt, MT_FLAGS_ALLOC_RANGE);
- newmas.tree = &newmt;
- mas_reset(&newmas);
- mas_reset(&mas);
- mas.index = 0;
- mas.last = 0;
- rcu_read_lock();
- down_write(&newmt_lock);
- if (mas_expected_entries(&newmas, nr_entries)) {
- printk("OOM!");
+ mt_init_flags(&newmt,
+ MT_FLAGS_ALLOC_RANGE | MT_FLAGS_LOCK_EXTERN);
+ mt_set_external_lock(&newmt, &newmt_lock);
+
+ down_write_nested(&newmt_lock, SINGLE_DEPTH_NESTING);
+ ret = __mt_dup(&mt, &newmt, GFP_KERNEL);
+ if (ret) {
+ pr_err("OOM!");
BUG_ON(1);
}
- mas_for_each(&mas, val, ULONG_MAX) {
- newmas.index = mas.index;
- newmas.last = mas.last;
+
+ mas_set(&newmas, 0);
+ mas_for_each(&newmas, val, ULONG_MAX)
mas_store(&newmas, val);
- }
+
mas_destroy(&newmas);
- rcu_read_unlock();
mt_validate(&newmt);
- mt_set_non_kernel(0);
__mt_destroy(&newmt);
up_write(&newmt_lock);
}
+ mas_destroy(&mas);
+ __mt_destroy(&mt);
+ up_write(&mt_lock);
}
#endif
@@ -2175,7 +2197,7 @@ static noinline void __init next_prev_test(struct maple_tree *mt)
MT_BUG_ON(mt, val != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 5);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
mas.index = 0;
mas.last = 5;
@@ -3039,10 +3061,6 @@ static noinline void __init check_empty_area_fill(struct maple_tree *mt)
* DNE active active range of NULL
*/
-#define mas_active(x) (((x).node != MAS_ROOT) && \
- ((x).node != MAS_START) && \
- ((x).node != MAS_PAUSE) && \
- ((x).node != MAS_NONE))
static noinline void __init check_state_handling(struct maple_tree *mt)
{
MA_STATE(mas, mt, 0, 0);
@@ -3057,7 +3075,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
/* prev: Start -> underflow*/
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, mas.status != ma_underflow);
/* prev: Start -> root */
mas_set(&mas, 10);
@@ -3065,7 +3083,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* prev: pause -> root */
mas_set(&mas, 10);
@@ -3074,7 +3092,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* next: start -> none */
mas_set(&mas, 0);
@@ -3082,7 +3100,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* next: start -> none*/
mas_set(&mas, 10);
@@ -3090,7 +3108,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find: start -> root */
mas_set(&mas, 0);
@@ -3098,21 +3116,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* find: root -> none */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find: none -> none */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find: start -> none */
mas_set(&mas, 10);
@@ -3120,14 +3138,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find_rev: none -> root */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* find_rev: start -> root */
mas_set(&mas, 0);
@@ -3135,21 +3153,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* find_rev: root -> none */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find_rev: none -> none */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* find_rev: start -> root */
mas_set(&mas, 10);
@@ -3157,7 +3175,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: start -> none */
mas_set(&mas, 10);
@@ -3165,7 +3183,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: pause -> none*/
mas_set(&mas, 10);
@@ -3174,7 +3192,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: none -> none */
mas.index = mas.last = 10;
@@ -3182,14 +3200,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: none -> none */
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: start -> root */
mas_set(&mas, 0);
@@ -3197,7 +3215,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: pause -> root */
mas_set(&mas, 0);
@@ -3206,22 +3224,22 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: none -> root */
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: root -> root */
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
/* walk: root -> none */
mas_set(&mas, 10);
@@ -3229,7 +3247,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 1);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_NONE);
+ MT_BUG_ON(mt, mas.status != ma_none);
/* walk: none -> root */
mas.index = mas.last = 0;
@@ -3237,7 +3255,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0);
- MT_BUG_ON(mt, mas.node != MAS_ROOT);
+ MT_BUG_ON(mt, mas.status != ma_root);
mas_unlock(&mas);
@@ -3255,7 +3273,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* next: pause ->active */
mas_set(&mas, 0);
@@ -3264,126 +3282,132 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* next: none ->active */
mas.index = mas.last = 0;
mas.offset = 0;
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* next:active ->active */
- entry = mas_next(&mas, ULONG_MAX);
+ /* next:active ->active (spanning limit) */
+ entry = mas_next(&mas, 0x2100);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* next:active -> active beyond data */
+ /* next:active -> overflow (limit reached) beyond data */
entry = mas_next(&mas, 0x2999);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
- /* Continue after last range ends after max */
+ /* next:overflow -> active (limit changed) */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
-
- /* next:active -> active continued */
- entry = mas_next(&mas, ULONG_MAX);
- MT_BUG_ON(mt, entry != NULL);
- MT_BUG_ON(mt, mas.index != 0x3501);
- MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* next:active -> overflow */
+ /* next:active -> overflow (limit reached) */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
/* next:overflow -> overflow */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, mas.node != MAS_OVERFLOW);
+ MT_BUG_ON(mt, !mas_is_overflow(&mas));
/* prev:overflow -> active */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* next: none -> active, skip value at location */
mas_set(&mas, 0);
entry = mas_next(&mas, ULONG_MAX);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
mas.offset = 0;
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* prev:active ->active */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* prev:active -> active spanning end range */
+ /* prev:active -> underflow (span limit) */
+ mas_next(&mas, ULONG_MAX);
+ entry = mas_prev(&mas, 0x1200);
+ MT_BUG_ON(mt, entry != ptr);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_active(&mas)); /* spanning limit */
+ entry = mas_prev(&mas, 0x1200); /* underflow */
+ MT_BUG_ON(mt, entry != NULL);
+ MT_BUG_ON(mt, mas.index != 0x1000);
+ MT_BUG_ON(mt, mas.last != 0x1500);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
+
+ /* prev:underflow -> underflow (lower limit) spanning end range */
entry = mas_prev(&mas, 0x0100);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
- /* prev:active -> underflow */
+ /* prev:underflow -> underflow */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* prev:underflow -> underflow */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* next:underflow -> active */
entry = mas_next(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* prev:first value -> underflow */
entry = mas_prev(&mas, 0x1000);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, mas.node != MAS_UNDERFLOW);
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* find:underflow -> first value */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* prev: pause ->active */
mas_set(&mas, 0x3600);
@@ -3394,21 +3418,21 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* prev:active -> active spanning min */
+ /* prev:active -> underflow spanning min */
entry = mas_prev(&mas, 0x1600);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* prev: active ->active, continue */
entry = mas_prev(&mas, 0);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find: start ->active */
mas_set(&mas, 0);
@@ -3416,7 +3440,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find: pause ->active */
mas_set(&mas, 0);
@@ -3425,7 +3449,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find: start ->active on value */;
mas_set(&mas, 1200);
@@ -3433,14 +3457,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find:active ->active */
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find:active -> active (NULL)*/
@@ -3448,35 +3472,35 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x2501);
MT_BUG_ON(mt, mas.last != 0x2FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MAS_BUG_ON(&mas, !mas_is_active(&mas));
/* find: overflow ->active */
entry = mas_find(&mas, 0x5000);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find:active -> active (NULL) end*/
entry = mas_find(&mas, ULONG_MAX);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x3501);
MT_BUG_ON(mt, mas.last != ULONG_MAX);
- MT_BUG_ON(mt, !mas_active(mas));
+ MAS_BUG_ON(&mas, !mas_is_active(&mas));
/* find_rev: active (END) ->active */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr3);
MT_BUG_ON(mt, mas.index != 0x3000);
MT_BUG_ON(mt, mas.last != 0x3500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find_rev:active ->active */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != ptr2);
MT_BUG_ON(mt, mas.index != 0x2000);
MT_BUG_ON(mt, mas.last != 0x2500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* find_rev: pause ->active */
mas_pause(&mas);
@@ -3484,14 +3508,14 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
- /* find_rev:active -> active */
+ /* find_rev:active -> underflow */
entry = mas_find_rev(&mas, 0);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0);
MT_BUG_ON(mt, mas.last != 0x0FFF);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_underflow(&mas));
/* find_rev: start ->active */
mas_set(&mas, 0x1200);
@@ -3499,7 +3523,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk start ->active */
mas_set(&mas, 0x1200);
@@ -3507,7 +3531,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk start ->active */
mas_set(&mas, 0x1600);
@@ -3515,7 +3539,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk pause ->active */
mas_set(&mas, 0x1200);
@@ -3524,7 +3548,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk pause -> active */
mas_set(&mas, 0x1600);
@@ -3533,25 +3557,25 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk none -> active */
mas_set(&mas, 0x1200);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk none -> active */
mas_set(&mas, 0x1600);
- mas.node = MAS_NONE;
+ mas.status = ma_none;
entry = mas_walk(&mas);
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk active -> active */
mas.index = 0x1200;
@@ -3561,7 +3585,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != ptr);
MT_BUG_ON(mt, mas.index != 0x1000);
MT_BUG_ON(mt, mas.last != 0x1500);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
/* mas_walk active -> active */
mas.index = 0x1600;
@@ -3570,7 +3594,7 @@ static noinline void __init check_state_handling(struct maple_tree *mt)
MT_BUG_ON(mt, entry != NULL);
MT_BUG_ON(mt, mas.index != 0x1501);
MT_BUG_ON(mt, mas.last != 0x1fff);
- MT_BUG_ON(mt, !mas_active(mas));
+ MT_BUG_ON(mt, !mas_is_active(&mas));
mas_unlock(&mas);
}
@@ -3585,10 +3609,6 @@ static int __init maple_tree_seed(void)
pr_info("\nTEST STARTING\n\n");
- mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_root_expand(&tree);
- mtree_destroy(&tree);
-
#if defined(BENCH_SLOT_STORE)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
@@ -3617,13 +3637,18 @@ static int __init maple_tree_seed(void)
mtree_destroy(&tree);
goto skip;
#endif
-#if defined(BENCH_FORK)
+#if defined(BENCH_LOAD)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- bench_forking(&tree);
+ bench_load(&tree);
mtree_destroy(&tree);
goto skip;
#endif
+#if defined(BENCH_FORK)
+#define BENCH
+ bench_forking();
+ goto skip;
+#endif
#if defined(BENCH_MT_FOR_EACH)
#define BENCH
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
@@ -3647,13 +3672,15 @@ static int __init maple_tree_seed(void)
#endif
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_iteration(&tree);
+ check_root_expand(&tree);
mtree_destroy(&tree);
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
- check_forking(&tree);
+ check_iteration(&tree);
mtree_destroy(&tree);
+ check_forking();
+
mt_init_flags(&tree, MT_FLAGS_ALLOC_RANGE);
check_mas_store_gfp(&tree);
mtree_destroy(&tree);
diff --git a/lib/test_meminit.c b/lib/test_meminit.c
index 0ae35223d773..0dc173849a54 100644
--- a/lib/test_meminit.c
+++ b/lib/test_meminit.c
@@ -93,7 +93,7 @@ static int __init test_pages(int *total_failures)
int failures = 0, num_tests = 0;
int i;
- for (i = 0; i <= MAX_ORDER; i++)
+ for (i = 0; i < NR_PAGE_ORDERS; i++)
num_tests += do_alloc_pages_order(i, &failures);
REPORT_FAILURES_IN_FN();
diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c
index c20f6cb4bf55..42b585208249 100644
--- a/lib/test_rhashtable.c
+++ b/lib/test_rhashtable.c
@@ -16,6 +16,7 @@
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/rcupdate.h>
+#include <linux/rcupdate_wait.h>
#include <linux/rhashtable.h>
#include <linux/slab.h>
#include <linux/sched.h>
diff --git a/lib/test_sysctl.c b/lib/test_sysctl.c
index 8036aa91a1cb..9321d850931f 100644
--- a/lib/test_sysctl.c
+++ b/lib/test_sysctl.c
@@ -35,6 +35,8 @@ static struct {
struct ctl_table_header *test_h_setup_node;
struct ctl_table_header *test_h_mnt;
struct ctl_table_header *test_h_mnterror;
+ struct ctl_table_header *empty_add;
+ struct ctl_table_header *empty;
} sysctl_test_headers;
struct test_sysctl_data {
@@ -130,7 +132,6 @@ static struct ctl_table test_table[] = {
.mode = 0644,
.proc_handler = proc_do_large_bitmap,
},
- { }
};
static void test_sysctl_calc_match_int_ok(void)
@@ -184,7 +185,6 @@ static struct ctl_table test_table_unregister[] = {
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
},
- {}
};
static int test_sysctl_run_unregister_nested(void)
@@ -220,6 +220,25 @@ static int test_sysctl_run_register_mount_point(void)
return 0;
}
+static struct ctl_table test_table_empty[] = { };
+
+static int test_sysctl_run_register_empty(void)
+{
+ /* Tets that an empty dir can be created */
+ sysctl_test_headers.empty_add
+ = register_sysctl("debug/test_sysctl/empty_add", test_table_empty);
+ if (!sysctl_test_headers.empty_add)
+ return -ENOMEM;
+
+ /* Test that register on top of an empty dir works */
+ sysctl_test_headers.empty
+ = register_sysctl("debug/test_sysctl/empty_add/empty", test_table_empty);
+ if (!sysctl_test_headers.empty)
+ return -ENOMEM;
+
+ return 0;
+}
+
static int __init test_sysctl_init(void)
{
int err;
@@ -233,6 +252,10 @@ static int __init test_sysctl_init(void)
goto out;
err = test_sysctl_run_register_mount_point();
+ if (err)
+ goto out;
+
+ err = test_sysctl_run_register_empty();
out:
return err;
@@ -248,6 +271,10 @@ static void __exit test_sysctl_exit(void)
unregister_sysctl_table(sysctl_test_headers.test_h_mnt);
if (sysctl_test_headers.test_h_mnterror)
unregister_sysctl_table(sysctl_test_headers.test_h_mnterror);
+ if (sysctl_test_headers.empty)
+ unregister_sysctl_table(sysctl_test_headers.empty);
+ if (sysctl_test_headers.empty_add)
+ unregister_sysctl_table(sysctl_test_headers.empty_add);
}
module_exit(test_sysctl_exit);
diff --git a/lib/trace_readwrite.c b/lib/trace_readwrite.c
index 62b4e8b3c733..a94cd56a1e4c 100644
--- a/lib/trace_readwrite.c
+++ b/lib/trace_readwrite.c
@@ -7,7 +7,7 @@
#include <linux/ftrace.h>
#include <linux/module.h>
-#include <asm-generic/io.h>
+#include <linux/io.h>
#define CREATE_TRACE_POINTS
#include <trace/events/rwmmio.h>
diff --git a/lib/ubsan.c b/lib/ubsan.c
index 3f90810f9f42..df4f8d1354bb 100644
--- a/lib/ubsan.c
+++ b/lib/ubsan.c
@@ -204,8 +204,8 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
{
current->in_ubsan++;
- pr_err("========================================"
- "========================================\n");
+ pr_warn(CUT_HERE);
+
pr_err("UBSAN: %s in %s:%d:%d\n", reason, loc->file_name,
loc->line & LINE_MASK, loc->column & COLUMN_MASK);
@@ -215,8 +215,7 @@ static void ubsan_prologue(struct source_location *loc, const char *reason)
static void ubsan_epilogue(void)
{
dump_stack();
- pr_err("========================================"
- "========================================\n");
+ pr_warn("---[ end trace ]---\n");
current->in_ubsan--;
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 3e3733a7084f..552738f14275 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -2111,15 +2111,20 @@ char *fwnode_full_name_string(struct fwnode_handle *fwnode, char *buf,
/* Loop starting from the root node to the current node. */
for (depth = fwnode_count_parents(fwnode); depth >= 0; depth--) {
- struct fwnode_handle *__fwnode =
- fwnode_get_nth_parent(fwnode, depth);
+ /*
+ * Only get a reference for other nodes (i.e. parent nodes).
+ * fwnode refcount may be 0 here.
+ */
+ struct fwnode_handle *__fwnode = depth ?
+ fwnode_get_nth_parent(fwnode, depth) : fwnode;
buf = string(buf, end, fwnode_get_name_prefix(__fwnode),
default_str_spec);
buf = string(buf, end, fwnode_get_name(__fwnode),
default_str_spec);
- fwnode_handle_put(__fwnode);
+ if (depth)
+ fwnode_handle_put(__fwnode);
}
return buf;