From 9cb937e2195bc46aa3f21c50f78ee994bbf6e04a Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Thu, 28 Jul 2016 15:47:08 -0700 Subject: mm, page_alloc: fix dirtyable highmem calculation When I tested vmscale in mmtest in 32bit, I found the benchmark was slow down 0.5 times. base node 1 global-1 User 12.98 16.04 System 147.61 166.42 Elapsed 26.48 38.08 With vmstat, I found IO wait avg is much increased compared to base. The reason was highmem_dirtyable_memory accumulates free pages and highmem_file_pages from HIGHMEM to MOVABLE zones which was wrong. With that, dirth_thresh in throtlle_vm_write is always 0 so that it calls congestion_wait frequently if writeback starts. With this patch, it is much recovered. base node fi 1 global-1 fix User 12.98 16.04 13.78 System 147.61 166.42 143.92 Elapsed 26.48 38.08 29.64 Link: http://lkml.kernel.org/r/1468404004-5085-4-git-send-email-mgorman@techsingularity.net Signed-off-by: Minchan Kim Signed-off-by: Mel Gorman Acked-by: Johannes Weiner Acked-by: Vlastimil Babka Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page-writeback.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 0bca2376bd42..573d138fa7a5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -307,27 +307,31 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) { #ifdef CONFIG_HIGHMEM int node; - unsigned long x = 0; + unsigned long x; int i; - unsigned long dirtyable = atomic_read(&highmem_file_pages); + unsigned long dirtyable = 0; for_each_node_state(node, N_HIGH_MEMORY) { for (i = ZONE_NORMAL + 1; i < MAX_NR_ZONES; i++) { struct zone *z; + unsigned long nr_pages; if (!is_highmem_idx(i)) continue; z = &NODE_DATA(node)->node_zones[i]; - dirtyable += zone_page_state(z, NR_FREE_PAGES); + if (!populated_zone(z)) + continue; + nr_pages = zone_page_state(z, NR_FREE_PAGES); /* watch for underflows */ - dirtyable -= min(dirtyable, high_wmark_pages(z)); - - x += dirtyable; + nr_pages -= min(nr_pages, high_wmark_pages(z)); + dirtyable += nr_pages; } } + x = dirtyable + atomic_read(&highmem_file_pages); + /* * Unreclaimable memory (kernel memory or anonymous memory * without swap) can bring down the dirtyable pages below -- cgit v1.2.3