diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2016-05-19 17:14:41 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-05-19 19:12:14 -0700 |
commit | 4e6118016eb7986109ad61b00186579f384f956a (patch) | |
tree | f3c06e8c7d5c726bacb8dafb5fd959484850bc99 /mm/page_alloc.c | |
parent | e2769dbdc51f1baa1908ecf6c84d50f19577e1db (diff) |
mm, page_alloc: uninline the bad page part of check_new_page()
Bad pages should be rare so the code handling them doesn't need to be
inline for performance reasons. Put it to separate function which
returns void. This also assumes that the initial page_expected_state()
result will match the result of the thorough check, i.e. the page
doesn't become "good" in the meanwhile. This matches the same
expectations already in place in free_pages_check().
!DEBUG_VM bloat-o-meter:
add/remove: 1/0 grow/shrink: 0/1 up/down: 134/-274 (-140)
function old new delta
check_new_page_bad - 134 +134
get_page_from_freelist 3468 3194 -274
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Mel Gorman <mgorman@techsingularity.net>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 33 |
1 files changed, 17 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7d8f642c498d..ecf663358b0d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1647,19 +1647,11 @@ static inline void expand(struct zone *zone, struct page *page, } } -/* - * This page is about to be returned from the page allocator - */ -static inline int check_new_page(struct page *page) +static void check_new_page_bad(struct page *page) { - const char *bad_reason; - unsigned long bad_flags; + const char *bad_reason = NULL; + unsigned long bad_flags = 0; - if (page_expected_state(page, PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)) - return 0; - - bad_reason = NULL; - bad_flags = 0; if (unlikely(atomic_read(&page->_mapcount) != -1)) bad_reason = "nonzero mapcount"; if (unlikely(page->mapping != NULL)) @@ -1678,11 +1670,20 @@ static inline int check_new_page(struct page *page) if (unlikely(page->mem_cgroup)) bad_reason = "page still charged to cgroup"; #endif - if (unlikely(bad_reason)) { - bad_page(page, bad_reason, bad_flags); - return 1; - } - return 0; + bad_page(page, bad_reason, bad_flags); +} + +/* + * This page is about to be returned from the page allocator + */ +static inline int check_new_page(struct page *page) +{ + if (likely(page_expected_state(page, + PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON))) + return 0; + + check_new_page_bad(page); + return 1; } static inline bool free_pages_prezeroed(bool poisoned) |