summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2009-06-19 19:30:56 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-20 16:08:22 -0700
commitc277331d5fbaae5772ed19862feefa91f4e477d3 (patch)
treefcd980b58d9487421e9b0c45b7c082fa1302debb
parent9063c61fd5cbd6f42e95929aa0e02380c9e15656 (diff)
mm: page_alloc: clear PG_locked before checking flags on free
da456f1 "page allocator: do not disable interrupts in free_page_mlock()" moved the PG_mlocked clearing after the flag sanity checking which makes mlocked pages always trigger 'bad page'. Fix this by clearing the bit up front. Reported--and-debugged-by: Peter Chubb <peter.chubb@nicta.com.au> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Mel Gorman <mel@csn.ul.ie> Tested-by: Maxim Levitsky <maximlevitsky@gmail.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6f0753fe694..30d5093a099 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -488,7 +488,6 @@ static inline void __free_one_page(struct page *page,
*/
static inline void free_page_mlock(struct page *page)
{
- __ClearPageMlocked(page);
__dec_zone_page_state(page, NR_MLOCK);
__count_vm_event(UNEVICTABLE_MLOCKFREED);
}
@@ -558,7 +557,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
unsigned long flags;
int i;
int bad = 0;
- int clearMlocked = PageMlocked(page);
+ int wasMlocked = TestClearPageMlocked(page);
kmemcheck_free_shadow(page, order);
@@ -576,7 +575,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
kernel_map_pages(page, 1 << order, 0);
local_irq_save(flags);
- if (unlikely(clearMlocked))
+ if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order,
@@ -1022,7 +1021,7 @@ static void free_hot_cold_page(struct page *page, int cold)
struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp;
unsigned long flags;
- int clearMlocked = PageMlocked(page);
+ int wasMlocked = TestClearPageMlocked(page);
kmemcheck_free_shadow(page, 0);
@@ -1041,7 +1040,7 @@ static void free_hot_cold_page(struct page *page, int cold)
pcp = &zone_pcp(zone, get_cpu())->pcp;
set_page_private(page, get_pageblock_migratetype(page));
local_irq_save(flags);
- if (unlikely(clearMlocked))
+ if (unlikely(wasMlocked))
free_page_mlock(page);
__count_vm_event(PGFREE);