summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2011-03-23 16:42:33 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-23 19:46:29 -0700
commite7018b8d27e0c9aa2200e5b393e0fe9093c6565c (patch)
tree13a4d92cd78bc6f97d9a2b6d77880c9ea5d67aa1
parentbf1ff2635a5fda207fc870df348bfc766e8dcd4d (diff)
memcg: keep only one charge cancelling function
We have two charge cancelling functions: one takes a page count, the other a page size. The second one just divides the parameter by PAGE_SIZE and then calls the first one. This is trivial, no need for an extra function. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memcontrol.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8f9381976c59..09a450684f70 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2031,21 +2031,17 @@ bypass:
* gotten by try_charge().
*/
static void __mem_cgroup_cancel_charge(struct mem_cgroup *mem,
- unsigned long count)
+ unsigned int nr_pages)
{
if (!mem_cgroup_is_root(mem)) {
- res_counter_uncharge(&mem->res, PAGE_SIZE * count);
+ unsigned long bytes = nr_pages * PAGE_SIZE;
+
+ res_counter_uncharge(&mem->res, bytes);
if (do_swap_account)
- res_counter_uncharge(&mem->memsw, PAGE_SIZE * count);
+ res_counter_uncharge(&mem->memsw, bytes);
}
}
-static void mem_cgroup_cancel_charge(struct mem_cgroup *mem,
- int page_size)
-{
- __mem_cgroup_cancel_charge(mem, page_size >> PAGE_SHIFT);
-}
-
/*
* A helper function to get mem_cgroup from ID. must be called under
* rcu_read_lock(). The caller must check css_is_removed() or some if
@@ -2104,7 +2100,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
lock_page_cgroup(pc);
if (unlikely(PageCgroupUsed(pc))) {
unlock_page_cgroup(pc);
- mem_cgroup_cancel_charge(mem, page_size);
+ __mem_cgroup_cancel_charge(mem, nr_pages);
return;
}
/*
@@ -2242,7 +2238,7 @@ static int mem_cgroup_move_account(struct page *page, struct page_cgroup *pc,
mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -nr_pages);
if (uncharge)
/* This is not "cancel", but cancel_charge does all we need. */
- mem_cgroup_cancel_charge(from, charge_size);
+ __mem_cgroup_cancel_charge(from, nr_pages);
/* caller should have done css_get */
pc->mem_cgroup = to;
@@ -2307,7 +2303,7 @@ static int mem_cgroup_move_parent(struct page *page,
ret = mem_cgroup_move_account(page, pc, child, parent, true, page_size);
if (ret)
- mem_cgroup_cancel_charge(parent, page_size);
+ __mem_cgroup_cancel_charge(parent, page_size >> PAGE_SHIFT);
if (page_size > PAGE_SIZE)
compound_unlock_irqrestore(page, flags);
@@ -2538,7 +2534,7 @@ void mem_cgroup_cancel_charge_swapin(struct mem_cgroup *mem)
return;
if (!mem)
return;
- mem_cgroup_cancel_charge(mem, PAGE_SIZE);
+ __mem_cgroup_cancel_charge(mem, 1);
}
static void