diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2016-03-15 14:57:22 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 16:55:16 -0700 |
commit | 62cccb8c8e7a3ca233f49d5e7dcb1557d25465cd (patch) | |
tree | 43a902faf461c65393a4efebf9ff9622017b92b1 /mm/memcontrol.c | |
parent | 6a93ca8fde3cfce0f00f02281139a377c83e8d8c (diff) |
mm: simplify lock_page_memcg()
Now that migration doesn't clear page->mem_cgroup of live pages anymore,
it's safe to make lock_page_memcg() and the memcg stat functions take
pages, and spare the callers from memcg objects.
[akpm@linux-foundation.org: fix warnings]
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Suggested-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com>
Cc: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 64506b2eef34..3e4199830456 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1690,7 +1690,7 @@ cleanup: * This function protects unlocked LRU pages from being moved to * another cgroup and stabilizes their page->mem_cgroup binding. */ -struct mem_cgroup *lock_page_memcg(struct page *page) +void lock_page_memcg(struct page *page) { struct mem_cgroup *memcg; unsigned long flags; @@ -1699,25 +1699,18 @@ struct mem_cgroup *lock_page_memcg(struct page *page) * The RCU lock is held throughout the transaction. The fast * path can get away without acquiring the memcg->move_lock * because page moving starts with an RCU grace period. - * - * The RCU lock also protects the memcg from being freed when - * the page state that is going to change is the only thing - * preventing the page from being uncharged. - * E.g. end-writeback clearing PageWriteback(), which allows - * migration to go ahead and uncharge the page before the - * account transaction might be complete. */ rcu_read_lock(); if (mem_cgroup_disabled()) - return NULL; + return; again: memcg = page->mem_cgroup; if (unlikely(!memcg)) - return NULL; + return; if (atomic_read(&memcg->moving_account) <= 0) - return memcg; + return; spin_lock_irqsave(&memcg->move_lock, flags); if (memcg != page->mem_cgroup) { @@ -1733,16 +1726,18 @@ again: memcg->move_lock_task = current; memcg->move_lock_flags = flags; - return memcg; + return; } EXPORT_SYMBOL(lock_page_memcg); /** * unlock_page_memcg - unlock a page->mem_cgroup binding - * @memcg: the memcg returned by lock_page_memcg() + * @page: the page */ -void unlock_page_memcg(struct mem_cgroup *memcg) +void unlock_page_memcg(struct page *page) { + struct mem_cgroup *memcg = page->mem_cgroup; + if (memcg && memcg->move_lock_task == current) { unsigned long flags = memcg->move_lock_flags; |