diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-18 14:48:11 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2024-07-18 14:48:11 -0700 |
commit | b2fc97c18614f99179700be263ecbc667c91a4e8 (patch) | |
tree | 13914a5bb2fcef7691c84796ab5a36459b949873 /tools | |
parent | 68b59730459e5d1fe4e0bbeb04ceb9df0f002270 (diff) | |
parent | 9364a7e40d54e6858479f0a96e1a04aa1204be16 (diff) |
Merge tag 'memblock-v6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock
Pull memblock updates from Mike Rapoport:
- 'reserve_mem' command line parameter to allow creation of named
memory reservation at boot time.
The driving use-case is to improve the ability of pstore to retain
ramoops data across reboots.
- cleanups and small improvements in memblock and mm_init
- new tests cases in memblock test suite
* tag 'memblock-v6.11-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rppt/memblock:
memblock tests: fix implicit declaration of function 'numa_valid_node'
memblock: Move late alloc warning down to phys alloc
pstore/ramoops: Add ramoops.mem_name= command line option
mm/memblock: Add "reserve_mem" to reserved named memory at boot up
mm/mm_init.c: don't initialize page->lru again
mm/mm_init.c: not always search next deferred_init_pfn from very beginning
mm/mm_init.c: use deferred_init_mem_pfn_range_in_zone() to decide loop condition
mm/mm_init.c: get the highest zone directly
mm/mm_init.c: move nr_initialised reset down a bit
mm/memblock: fix a typo in description of for_each_mem_region()
mm/mm_init.c: use memblock_region_memory_base_pfn() to get startpfn
mm/memblock: use PAGE_ALIGN_DOWN to get pgend in free_memmap
mm/memblock: return true directly on finding overlap region
memblock tests: add memblock_overlaps_region_checks
mm/memblock: fix comment for memblock_isolate_range()
memblock tests: add memblock_reserve_many_may_conflict_check()
memblock tests: add memblock_reserve_all_locations_check()
mm/memblock: remove empty dummy entry
Diffstat (limited to 'tools')
-rw-r--r-- | tools/include/linux/mm.h | 1 | ||||
-rw-r--r-- | tools/include/linux/numa.h | 5 | ||||
-rw-r--r-- | tools/testing/memblock/tests/basic_api.c | 314 | ||||
-rw-r--r-- | tools/testing/memblock/tests/common.c | 8 | ||||
-rw-r--r-- | tools/testing/memblock/tests/common.h | 4 |
5 files changed, 324 insertions, 8 deletions
diff --git a/tools/include/linux/mm.h b/tools/include/linux/mm.h index dc0fc7125bc3..cad4f2927983 100644 --- a/tools/include/linux/mm.h +++ b/tools/include/linux/mm.h @@ -12,6 +12,7 @@ #define PHYS_ADDR_MAX (~(phys_addr_t)0) #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE) +#define PAGE_ALIGN_DOWN(addr) ALIGN_DOWN(addr, PAGE_SIZE) #define __va(x) ((void *)((unsigned long)(x))) #define __pa(x) ((unsigned long)(x)) diff --git a/tools/include/linux/numa.h b/tools/include/linux/numa.h index 110b0e5d0fb0..c8b9369335e0 100644 --- a/tools/include/linux/numa.h +++ b/tools/include/linux/numa.h @@ -13,4 +13,9 @@ #define NUMA_NO_NODE (-1) +static inline bool numa_valid_node(int nid) +{ + return nid >= 0 && nid < MAX_NUMNODES; +} + #endif /* _LINUX_NUMA_H */ diff --git a/tools/testing/memblock/tests/basic_api.c b/tools/testing/memblock/tests/basic_api.c index 57bf2688edfd..67503089e6a0 100644 --- a/tools/testing/memblock/tests/basic_api.c +++ b/tools/testing/memblock/tests/basic_api.c @@ -15,12 +15,12 @@ static int memblock_initialization_check(void) PREFIX_PUSH(); ASSERT_NE(memblock.memory.regions, NULL); - ASSERT_EQ(memblock.memory.cnt, 1); + ASSERT_EQ(memblock.memory.cnt, 0); ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); ASSERT_EQ(strcmp(memblock.memory.name, "memory"), 0); ASSERT_NE(memblock.reserved.regions, NULL); - ASSERT_EQ(memblock.reserved.cnt, 1); + ASSERT_EQ(memblock.reserved.cnt, 0); ASSERT_EQ(memblock.memory.max, EXPECTED_MEMBLOCK_REGIONS); ASSERT_EQ(strcmp(memblock.reserved.name, "reserved"), 0); @@ -982,6 +982,262 @@ static int memblock_reserve_many_check(void) return 0; } + +/* + * A test that trying to reserve the 129th memory block at all locations. + * Expect to trigger memblock_double_array() to double the + * memblock.memory.max, find a new valid memory as reserved.regions. + * + * 0 1 2 128 + * +-------+ +-------+ +-------+ +-------+ + * | 32K | | 32K | | 32K | ... | 32K | + * +-------+-------+-------+-------+-------+ +-------+ + * |<-32K->| |<-32K->| + * + */ +/* Keep the gap so these memory region will not be merged. */ +#define MEMORY_BASE(idx) (SZ_128K + (MEM_SIZE * 2) * (idx)) +static int memblock_reserve_all_locations_check(void) +{ + int i, skip; + void *orig_region; + struct region r = { + .base = SZ_16K, + .size = SZ_16K, + }; + phys_addr_t new_reserved_regions_size; + + PREFIX_PUSH(); + + /* Reserve the 129th memory block for all possible positions*/ + for (skip = 0; skip < INIT_MEMBLOCK_REGIONS + 1; skip++) { + reset_memblock_regions(); + memblock_allow_resize(); + + /* Add a valid memory region used by double_array(). */ + dummy_physical_memory_init(); + memblock_add(dummy_physical_memory_base(), MEM_SIZE); + + for (i = 0; i < INIT_MEMBLOCK_REGIONS + 1; i++) { + if (i == skip) + continue; + + /* Reserve some fakes memory region to fulfill the memblock. */ + memblock_reserve(MEMORY_BASE(i), MEM_SIZE); + + if (i < skip) { + ASSERT_EQ(memblock.reserved.cnt, i + 1); + ASSERT_EQ(memblock.reserved.total_size, (i + 1) * MEM_SIZE); + } else { + ASSERT_EQ(memblock.reserved.cnt, i); + ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE); + } + } + + orig_region = memblock.reserved.regions; + + /* This reserve the 129 memory_region, and makes it double array. */ + memblock_reserve(MEMORY_BASE(skip), MEM_SIZE); + + /* + * This is the memory region size used by the doubled reserved.regions, + * and it has been reserved due to it has been used. The size is used to + * calculate the total_size that the memblock.reserved have now. + */ + new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) * + sizeof(struct memblock_region)); + /* + * The double_array() will find a free memory region as the new + * reserved.regions, and the used memory region will be reserved, so + * there will be one more region exist in the reserved memblock. And the + * one more reserved region's size is new_reserved_regions_size. + */ + ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2); + ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + + new_reserved_regions_size); + ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); + + /* + * Now memblock_double_array() works fine. Let's check after the + * double_array(), the memblock_reserve() still works as normal. + */ + memblock_reserve(r.base, r.size); + ASSERT_EQ(memblock.reserved.regions[0].base, r.base); + ASSERT_EQ(memblock.reserved.regions[0].size, r.size); + + ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3); + ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + + new_reserved_regions_size + + r.size); + ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); + + dummy_physical_memory_cleanup(); + + /* + * The current reserved.regions is occupying a range of memory that + * allocated from dummy_physical_memory_init(). After free the memory, + * we must not use it. So restore the origin memory region to make sure + * the tests can run as normal and not affected by the double array. + */ + memblock.reserved.regions = orig_region; + memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS; + } + + test_pass_pop(); + + return 0; +} + +/* + * A test that trying to reserve the 129th memory block at all locations. + * Expect to trigger memblock_double_array() to double the + * memblock.memory.max, find a new valid memory as reserved.regions. And make + * sure it doesn't conflict with the range we want to reserve. + * + * For example, we have 128 regions in reserved and now want to reserve + * the skipped one. Since reserved is full, memblock_double_array() would find + * an available range in memory for the new array. We intended to put two + * ranges in memory with one is the exact range of the skipped one. Before + * commit 48c3b583bbdd ("mm/memblock: fix overlapping allocation when doubling + * reserved array"), the new array would sits in the skipped range which is a + * conflict. The expected new array should be allocated from memory.regions[0]. + * + * 0 1 + * memory +-------+ +-------+ + * | 32K | | 32K | + * +-------+ ------+-------+-------+-------+ + * |<-32K->|<-32K->|<-32K->| + * + * 0 skipped 127 + * reserved +-------+ ......... +-------+ + * | 32K | . 32K . ... | 32K | + * +-------+-------+-------+ +-------+ + * |<-32K->| + * ^ + * | + * | + * skipped one + */ +/* Keep the gap so these memory region will not be merged. */ +#define MEMORY_BASE_OFFSET(idx, offset) ((offset) + (MEM_SIZE * 2) * (idx)) +static int memblock_reserve_many_may_conflict_check(void) +{ + int i, skip; + void *orig_region; + struct region r = { + .base = SZ_16K, + .size = SZ_16K, + }; + phys_addr_t new_reserved_regions_size; + + /* + * 0 1 129 + * +---+ +---+ +---+ + * |32K| |32K| .. |32K| + * +---+ +---+ +---+ + * + * Pre-allocate the range for 129 memory block + one range for double + * memblock.reserved.regions at idx 0. + */ + dummy_physical_memory_init(); + phys_addr_t memory_base = dummy_physical_memory_base(); + phys_addr_t offset = PAGE_ALIGN(memory_base); + + PREFIX_PUSH(); + + /* Reserve the 129th memory block for all possible positions*/ + for (skip = 1; skip <= INIT_MEMBLOCK_REGIONS + 1; skip++) { + reset_memblock_regions(); + memblock_allow_resize(); + + reset_memblock_attributes(); + /* Add a valid memory region used by double_array(). */ + memblock_add(MEMORY_BASE_OFFSET(0, offset), MEM_SIZE); + /* + * Add a memory region which will be reserved as 129th memory + * region. This is not expected to be used by double_array(). + */ + memblock_add(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE); + + for (i = 1; i <= INIT_MEMBLOCK_REGIONS + 1; i++) { + if (i == skip) + continue; + + /* Reserve some fakes memory region to fulfill the memblock. */ + memblock_reserve(MEMORY_BASE_OFFSET(i, offset), MEM_SIZE); + + if (i < skip) { + ASSERT_EQ(memblock.reserved.cnt, i); + ASSERT_EQ(memblock.reserved.total_size, i * MEM_SIZE); + } else { + ASSERT_EQ(memblock.reserved.cnt, i - 1); + ASSERT_EQ(memblock.reserved.total_size, (i - 1) * MEM_SIZE); + } + } + + orig_region = memblock.reserved.regions; + + /* This reserve the 129 memory_region, and makes it double array. */ + memblock_reserve(MEMORY_BASE_OFFSET(skip, offset), MEM_SIZE); + + /* + * This is the memory region size used by the doubled reserved.regions, + * and it has been reserved due to it has been used. The size is used to + * calculate the total_size that the memblock.reserved have now. + */ + new_reserved_regions_size = PAGE_ALIGN((INIT_MEMBLOCK_REGIONS * 2) * + sizeof(struct memblock_region)); + /* + * The double_array() will find a free memory region as the new + * reserved.regions, and the used memory region will be reserved, so + * there will be one more region exist in the reserved memblock. And the + * one more reserved region's size is new_reserved_regions_size. + */ + ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 2); + ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + + new_reserved_regions_size); + ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); + + /* + * The first reserved region is allocated for double array + * with the size of new_reserved_regions_size and the base to be + * MEMORY_BASE_OFFSET(0, offset) + SZ_32K - new_reserved_regions_size + */ + ASSERT_EQ(memblock.reserved.regions[0].base + memblock.reserved.regions[0].size, + MEMORY_BASE_OFFSET(0, offset) + SZ_32K); + ASSERT_EQ(memblock.reserved.regions[0].size, new_reserved_regions_size); + + /* + * Now memblock_double_array() works fine. Let's check after the + * double_array(), the memblock_reserve() still works as normal. + */ + memblock_reserve(r.base, r.size); + ASSERT_EQ(memblock.reserved.regions[0].base, r.base); + ASSERT_EQ(memblock.reserved.regions[0].size, r.size); + + ASSERT_EQ(memblock.reserved.cnt, INIT_MEMBLOCK_REGIONS + 3); + ASSERT_EQ(memblock.reserved.total_size, (INIT_MEMBLOCK_REGIONS + 1) * MEM_SIZE + + new_reserved_regions_size + + r.size); + ASSERT_EQ(memblock.reserved.max, INIT_MEMBLOCK_REGIONS * 2); + + /* + * The current reserved.regions is occupying a range of memory that + * allocated from dummy_physical_memory_init(). After free the memory, + * we must not use it. So restore the origin memory region to make sure + * the tests can run as normal and not affected by the double array. + */ + memblock.reserved.regions = orig_region; + memblock.reserved.cnt = INIT_MEMBLOCK_RESERVED_REGIONS; + } + + dummy_physical_memory_cleanup(); + + test_pass_pop(); + + return 0; +} + static int memblock_reserve_checks(void) { prefix_reset(); @@ -997,6 +1253,8 @@ static int memblock_reserve_checks(void) memblock_reserve_between_check(); memblock_reserve_near_max_check(); memblock_reserve_many_check(); + memblock_reserve_all_locations_check(); + memblock_reserve_many_may_conflict_check(); prefix_pop(); @@ -1295,7 +1553,7 @@ static int memblock_remove_only_region_check(void) ASSERT_EQ(rgn->base, 0); ASSERT_EQ(rgn->size, 0); - ASSERT_EQ(memblock.memory.cnt, 1); + ASSERT_EQ(memblock.memory.cnt, 0); ASSERT_EQ(memblock.memory.total_size, 0); test_pass_pop(); @@ -1723,7 +1981,7 @@ static int memblock_free_only_region_check(void) ASSERT_EQ(rgn->base, 0); ASSERT_EQ(rgn->size, 0); - ASSERT_EQ(memblock.reserved.cnt, 1); + ASSERT_EQ(memblock.reserved.cnt, 0); ASSERT_EQ(memblock.reserved.total_size, 0); test_pass_pop(); @@ -2129,6 +2387,53 @@ static int memblock_trim_memory_checks(void) return 0; } +static int memblock_overlaps_region_check(void) +{ + struct region r = { + .base = SZ_1G, + .size = SZ_4M + }; + + PREFIX_PUSH(); + + reset_memblock_regions(); + memblock_add(r.base, r.size); + + /* Far Away */ + ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1M, SZ_1M)); + ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_2G, SZ_1M)); + + /* Neighbor */ + ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_1M)); + ASSERT_FALSE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_4M, SZ_1M)); + + /* Partial Overlap */ + ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_1M, SZ_2M)); + ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_2M, SZ_2M)); + + /* Totally Overlap */ + ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G, SZ_4M)); + ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G - SZ_2M, SZ_8M)); + ASSERT_TRUE(memblock_overlaps_region(&memblock.memory, SZ_1G + SZ_1M, SZ_1M)); + + test_pass_pop(); + + return 0; +} + +static int memblock_overlaps_region_checks(void) +{ + prefix_reset(); + prefix_push("memblock_overlaps_region"); + test_print("Running memblock_overlaps_region tests...\n"); + + memblock_overlaps_region_check(); + + prefix_pop(); + + return 0; +} + int memblock_basic_checks(void) { memblock_initialization_check(); @@ -2138,6 +2443,7 @@ int memblock_basic_checks(void) memblock_free_checks(); memblock_bottom_up_checks(); memblock_trim_memory_checks(); + memblock_overlaps_region_checks(); return 0; } diff --git a/tools/testing/memblock/tests/common.c b/tools/testing/memblock/tests/common.c index f43b6f414983..3250c8e5124b 100644 --- a/tools/testing/memblock/tests/common.c +++ b/tools/testing/memblock/tests/common.c @@ -40,13 +40,13 @@ void reset_memblock_regions(void) { memset(memblock.memory.regions, 0, memblock.memory.cnt * sizeof(struct memblock_region)); - memblock.memory.cnt = 1; + memblock.memory.cnt = 0; memblock.memory.max = INIT_MEMBLOCK_REGIONS; memblock.memory.total_size = 0; memset(memblock.reserved.regions, 0, memblock.reserved.cnt * sizeof(struct memblock_region)); - memblock.reserved.cnt = 1; + memblock.reserved.cnt = 0; memblock.reserved.max = INIT_MEMBLOCK_RESERVED_REGIONS; memblock.reserved.total_size = 0; } @@ -61,7 +61,7 @@ void reset_memblock_attributes(void) static inline void fill_memblock(void) { - memset(memory_block.base, 1, MEM_SIZE); + memset(memory_block.base, 1, PHYS_MEM_SIZE); } void setup_memblock(void) @@ -103,7 +103,7 @@ void setup_numa_memblock(const unsigned int node_fracs[]) void dummy_physical_memory_init(void) { - memory_block.base = malloc(MEM_SIZE); + memory_block.base = malloc(PHYS_MEM_SIZE); assert(memory_block.base); fill_memblock(); } diff --git a/tools/testing/memblock/tests/common.h b/tools/testing/memblock/tests/common.h index b5ec59aa62d7..e1138e06c903 100644 --- a/tools/testing/memblock/tests/common.h +++ b/tools/testing/memblock/tests/common.h @@ -12,6 +12,7 @@ #include <../selftests/kselftest.h> #define MEM_SIZE SZ_32K +#define PHYS_MEM_SIZE SZ_16M #define NUMA_NODES 8 #define INIT_MEMBLOCK_REGIONS 128 @@ -39,6 +40,9 @@ enum test_flags { assert((_expected) == (_seen)); \ } while (0) +#define ASSERT_TRUE(_seen) ASSERT_EQ(true, _seen) +#define ASSERT_FALSE(_seen) ASSERT_EQ(false, _seen) + /** * ASSERT_NE(): * Check the condition |