diff options
author | Mike Rapoport (IBM) <rppt@kernel.org> | 2023-01-04 21:18:05 +0200 |
---|---|---|
committer | Andrew Morton <akpm@linux-foundation.org> | 2023-01-18 17:12:54 -0800 |
commit | fc5744881eabcc73ed24a4229034f0fbdeb3f46f (patch) | |
tree | 92c72b2179322cc631ec4a0dae055cf18931e570 | |
parent | f78dfc7b77d5c3527d0f895bef693f711802de5a (diff) |
mm/page_alloc: invert logic for early page initialisation checks
Rename early_page_uninitialised() to early_page_initialised() and invert
its logic to make the code more readable.
Link: https://lkml.kernel.org/r/20230104191805.2535864-1-rppt@kernel.org
Signed-off-by: Mike Rapoport (IBM) <rppt@kernel.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Reviewed-by: Anshuman Khandual <anshuman.khandual@arm.com>
Acked-by: Mel Gorman <mgorman@suse.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r-- | mm/page_alloc.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 5668c1a2de49..5514d84cc712 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -443,15 +443,15 @@ static inline bool deferred_pages_enabled(void) return static_branch_unlikely(&deferred_pages); } -/* Returns true if the struct page for the pfn is uninitialised */ -static inline bool __meminit early_page_uninitialised(unsigned long pfn) +/* Returns true if the struct page for the pfn is initialised */ +static inline bool __meminit early_page_initialised(unsigned long pfn) { int nid = early_pfn_to_nid(pfn); if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn) - return true; + return false; - return false; + return true; } /* @@ -498,9 +498,9 @@ static inline bool deferred_pages_enabled(void) return false; } -static inline bool early_page_uninitialised(unsigned long pfn) +static inline bool early_page_initialised(unsigned long pfn) { - return false; + return true; } static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn) @@ -1643,7 +1643,7 @@ static void __meminit init_reserved_page(unsigned long pfn) pg_data_t *pgdat; int nid, zid; - if (!early_page_uninitialised(pfn)) + if (early_page_initialised(pfn)) return; nid = early_pfn_to_nid(pfn); @@ -1806,7 +1806,7 @@ int __meminit early_pfn_to_nid(unsigned long pfn) void __init memblock_free_pages(struct page *page, unsigned long pfn, unsigned int order) { - if (early_page_uninitialised(pfn)) + if (!early_page_initialised(pfn)) return; if (!kmsan_memblock_free_pages(page, order)) { /* KMSAN will take care of these pages. */ |