summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/vmscan.c5
2 files changed, 5 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5af33186a25f..71a0b2a23f5b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1725,9 +1725,9 @@ void __meminit build_all_zonelists(void)
stop_machine_run(__build_all_zonelists, NULL, NR_CPUS);
/* cpuset refresh routine should be here */
}
-
- printk("Built %i zonelists\n", num_online_nodes());
-
+ vm_total_pages = nr_free_pagecache_pages();
+ printk("Built %i zonelists. Total pages: %ld\n",
+ num_online_nodes(), vm_total_pages);
}
/*
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 71a02e295037..72babac71dea 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -110,7 +110,7 @@ struct shrinker {
* From 0 .. 100. Higher means more swappy.
*/
int vm_swappiness = 60;
-static long total_memory;
+long vm_total_pages; /* The total number of pages which the VM controls */
static LIST_HEAD(shrinker_list);
static DECLARE_RWSEM(shrinker_rwsem);
@@ -743,7 +743,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
* how much memory
* is mapped.
*/
- mapped_ratio = (sc->nr_mapped * 100) / total_memory;
+ mapped_ratio = (sc->nr_mapped * 100) / vm_total_pages;
/*
* Now decide how much we really want to unmap some pages. The
@@ -1482,7 +1482,6 @@ static int __init kswapd_init(void)
pgdat->kswapd = find_task_by_pid(pid);
read_unlock(&tasklist_lock);
}
- total_memory = nr_free_pagecache_pages();
hotcpu_notifier(cpu_callback, 0);
return 0;
}