From 6b101e2a3ce4d2a0312087598bd1ab4a1db2ac40 Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Wed, 10 Dec 2014 15:41:12 -0800 Subject: mm/CMA: fix boot regression due to physical address of high_memory high_memory isn't direct mapped memory so retrieving it's physical address isn't appropriate. But, it would be useful to check physical address of highmem boundary so it's justfiable to get physical address from it. In x86, there is a validation check if CONFIG_DEBUG_VIRTUAL and it triggers following boot failure reported by Ingo. ... BUG: Int 6: CR2 00f06f53 ... Call Trace: dump_stack+0x41/0x52 early_idt_handler+0x6b/0x6b cma_declare_contiguous+0x33/0x212 dma_contiguous_reserve_area+0x31/0x4e dma_contiguous_reserve+0x11d/0x125 setup_arch+0x7b5/0xb63 start_kernel+0xb8/0x3e6 i386_start_kernel+0x79/0x7d To fix boot regression, this patch implements workaround to avoid validation check in x86 when retrieving physical address of high_memory. __pa_nodebug() used by this patch is implemented only in x86 so there is no choice but to use dirty #ifdef. [akpm@linux-foundation.org: tweak comment] Signed-off-by: Joonsoo Kim Reported-by: Ingo Molnar Tested-by: Ingo Molnar Cc: Marek Szyprowski Cc: Russell King Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/cma.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/cma.c b/mm/cma.c index fde706e1284f..8e9ec13d31db 100644 --- a/mm/cma.c +++ b/mm/cma.c @@ -215,9 +215,21 @@ int __init cma_declare_contiguous(phys_addr_t base, bool fixed, struct cma **res_cma) { phys_addr_t memblock_end = memblock_end_of_DRAM(); - phys_addr_t highmem_start = __pa(high_memory); + phys_addr_t highmem_start; int ret = 0; +#ifdef CONFIG_X86 + /* + * high_memory isn't direct mapped memory so retrieving its physical + * address isn't appropriate. But it would be useful to check the + * physical address of the highmem boundary so it's justfiable to get + * the physical address from it. On x86 there is a validation check for + * this case, so the following workaround is needed to avoid it. + */ + highmem_start = __pa_nodebug(high_memory); +#else + highmem_start = __pa(high_memory); +#endif pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n", __func__, &size, &base, &limit, &alignment); -- cgit v1.2.3 From b455def28d8a22aee4a13d065b3fd1d296833606 Mon Sep 17 00:00:00 2001 From: LQYMGT Date: Wed, 10 Dec 2014 15:42:13 -0800 Subject: mm: slab/slub: coding style: whitespaces and tabs mixture Some code in mm/slab.c and mm/slub.c use whitespaces in indent. Clean them up. Signed-off-by: LQYMGT Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 10 +++++----- mm/slub.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index f34e053ec46e..eae2d21cc14f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -3580,11 +3580,11 @@ static int alloc_kmem_cache_node(struct kmem_cache *cachep, gfp_t gfp) for_each_online_node(node) { - if (use_alien_caches) { - new_alien = alloc_alien_cache(node, cachep->limit, gfp); - if (!new_alien) - goto fail; - } + if (use_alien_caches) { + new_alien = alloc_alien_cache(node, cachep->limit, gfp); + if (!new_alien) + goto fail; + } new_shared = NULL; if (cachep->shared) { diff --git a/mm/slub.c b/mm/slub.c index ae7b9f1ad394..761789ea1d09 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -2554,7 +2554,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page, } else { /* Needs to be taken off a list */ - n = get_node(s, page_to_nid(page)); + n = get_node(s, page_to_nid(page)); /* * Speculatively acquire the list_lock. * If the cmpxchg does not succeed then we may @@ -2587,10 +2587,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, * The list lock was not taken therefore no list * activity can be necessary. */ - if (was_frozen) - stat(s, FREE_FROZEN); - return; - } + if (was_frozen) + stat(s, FREE_FROZEN); + return; + } if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) goto slab_empty; -- cgit v1.2.3 From 1df3b26f201f7f08852c14596bc3ee6ba1826f11 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Dec 2014 15:42:16 -0800 Subject: slab: print slabinfo header in seq show Currently we print the slabinfo header in the seq start method, which makes it unusable for showing leaks, so we have leaks_show, which does practically the same as s_show except it doesn't show the header. However, we can print the header in the seq show method - we only need to check if the current element is the first on the list. This will allow us to use the same set of seq iterators for both leaks and slabinfo reporting, which is nice. Signed-off-by: Vladimir Davydov Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 8 +------- mm/slab.h | 1 + mm/slab_common.c | 15 ++++++--------- 3 files changed, 8 insertions(+), 16 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index eae2d21cc14f..a2152a2573dd 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4043,12 +4043,6 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer, #ifdef CONFIG_DEBUG_SLAB_LEAK -static void *leaks_start(struct seq_file *m, loff_t *pos) -{ - mutex_lock(&slab_mutex); - return seq_list_start(&slab_caches, *pos); -} - static inline int add_caller(unsigned long *n, unsigned long v) { unsigned long *p; @@ -4170,7 +4164,7 @@ static int leaks_show(struct seq_file *m, void *p) } static const struct seq_operations slabstats_op = { - .start = leaks_start, + .start = slab_start, .next = slab_next, .stop = slab_stop, .show = leaks_show, diff --git a/mm/slab.h b/mm/slab.h index ab019e63e3c2..53a55c70c409 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -357,6 +357,7 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) #endif +void *slab_start(struct seq_file *m, loff_t *pos); void *slab_next(struct seq_file *m, void *p, loff_t *pos); void slab_stop(struct seq_file *m, void *p); diff --git a/mm/slab_common.c b/mm/slab_common.c index dcdab81bd240..06aeaf091f21 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -834,14 +834,9 @@ void print_slabinfo_header(struct seq_file *m) seq_putc(m, '\n'); } -static void *s_start(struct seq_file *m, loff_t *pos) +void *slab_start(struct seq_file *m, loff_t *pos) { - loff_t n = *pos; - mutex_lock(&slab_mutex); - if (!n) - print_slabinfo_header(m); - return seq_list_start(&slab_caches, *pos); } @@ -903,10 +898,12 @@ int cache_show(struct kmem_cache *s, struct seq_file *m) return 0; } -static int s_show(struct seq_file *m, void *p) +static int slab_show(struct seq_file *m, void *p) { struct kmem_cache *s = list_entry(p, struct kmem_cache, list); + if (p == slab_caches.next) + print_slabinfo_header(m); if (!is_root_cache(s)) return 0; return cache_show(s, m); @@ -926,10 +923,10 @@ static int s_show(struct seq_file *m, void *p) * + further values on SMP and with statistics enabled */ static const struct seq_operations slabinfo_op = { - .start = s_start, + .start = slab_start, .next = slab_next, .stop = slab_stop, - .show = s_show, + .show = slab_show, }; static int slabinfo_open(struct inode *inode, struct file *file) -- cgit v1.2.3 From 5436205738b6f96b685ffdf3488772a1c4b152db Mon Sep 17 00:00:00 2001 From: Joonsoo Kim Date: Wed, 10 Dec 2014 15:42:18 -0800 Subject: mm/slab: reverse iteration on find_mergeable() Unlike SLUB, sometimes, object isn't started at the beginning of the slab in the SLAB. This causes the unalignment problem when after slab merging is supported by commit 12220dea07f1 ("mm/slab: support slab merge"). Alignment mismatch check is introduced ("mm/slab: fix unalignment problem on Malta with EVA due to slab merge") to prevent merge in this case. This causes undesirable result that merging happens between infrequently used kmem_caches if there are kmem_caches with same size and is 256 bytes, are merged into pool_workqueue rather than kmalloc-256, because kmem_caches for kmalloc are at the tail of the list. To prevent this situation, this patch reverses iteration order in find_mergeable() to find frequently used kmem_caches. This change helps to merge kmem_cache to frequently used kmem_caches, such as kmalloc kmem_caches. Signed-off-by: Joonsoo Kim Acked-by: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/slab_common.c b/mm/slab_common.c index 06aeaf091f21..2a3f5ff410cf 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -240,7 +240,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align, size = ALIGN(size, align); flags = kmem_cache_flags(size, flags, name, NULL); - list_for_each_entry(s, &slab_caches, list) { + list_for_each_entry_reverse(s, &slab_caches, list) { if (slab_unmergeable(s)) continue; -- cgit v1.2.3 From f6edde9cbe0634e4391b6e421a609ca3f57f6c38 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Wed, 10 Dec 2014 15:42:22 -0800 Subject: mm: slub: fix format mismatches in slab_err() callers Adding __printf(3, 4) to slab_err exposed following: mm/slub.c: In function `check_slab': mm/slub.c:852:4: warning: format `%u' expects argument of type `unsigned int', but argument 4 has type `const char *' [-Wformat=] s->name, page->objects, maxobj); ^ mm/slub.c:852:4: warning: too many arguments for format [-Wformat-extra-args] mm/slub.c:857:4: warning: format `%u' expects argument of type `unsigned int', but argument 4 has type `const char *' [-Wformat=] s->name, page->inuse, page->objects); ^ mm/slub.c:857:4: warning: too many arguments for format [-Wformat-extra-args] mm/slub.c: In function `on_freelist': mm/slub.c:905:4: warning: format `%d' expects argument of type `int', but argument 5 has type `long unsigned int' [-Wformat=] "should be %d", page->objects, max_objects); Fix first two warnings by removing redundant s->name. Fix the last by changing type of max_object from unsigned long to int. Signed-off-by: Andrey Ryabinin Cc: Christoph Lameter Cc: Pekka Enberg Acked-by: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slub.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slub.c b/mm/slub.c index 761789ea1d09..cf4f3c480b98 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -849,12 +849,12 @@ static int check_slab(struct kmem_cache *s, struct page *page) maxobj = order_objects(compound_order(page), s->size, s->reserved); if (page->objects > maxobj) { slab_err(s, page, "objects %u > max %u", - s->name, page->objects, maxobj); + page->objects, maxobj); return 0; } if (page->inuse > page->objects) { slab_err(s, page, "inuse %u > max %u", - s->name, page->inuse, page->objects); + page->inuse, page->objects); return 0; } /* Slab_pad_check fixes things up after itself */ @@ -871,7 +871,7 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) int nr = 0; void *fp; void *object = NULL; - unsigned long max_objects; + int max_objects; fp = page->freelist; while (fp && nr <= page->objects) { -- cgit v1.2.3 From c871ac4e9666ad68ae861172ef8a7f73d6e61b26 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Wed, 10 Dec 2014 15:42:25 -0800 Subject: slab: improve checking for invalid gfp_flags The code goes BUG, but doesn't tell us which bits were unexpectedly set. Print that out. Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.c | 5 ++++- mm/slub.c | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/slab.c b/mm/slab.c index a2152a2573dd..79e15f0a2a6e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -2590,7 +2590,10 @@ static int cache_grow(struct kmem_cache *cachep, * Be lazy and only check for valid flags here, keeping it out of the * critical path in kmem_cache_alloc(). */ - BUG_ON(flags & GFP_SLAB_BUG_MASK); + if (unlikely(flags & GFP_SLAB_BUG_MASK)) { + pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); + BUG(); + } local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK); /* Take the node list lock to change the colour_next on this node */ diff --git a/mm/slub.c b/mm/slub.c index cf4f3c480b98..386bbed76e94 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1377,7 +1377,10 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node) int order; int idx; - BUG_ON(flags & GFP_SLAB_BUG_MASK); + if (unlikely(flags & GFP_SLAB_BUG_MASK)) { + pr_emerg("gfp: %u\n", flags & GFP_SLAB_BUG_MASK); + BUG(); + } page = allocate_slab(s, flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node); -- cgit v1.2.3 From 8df0c2dcf61781d2efa8e6e5b06870f6c6785735 Mon Sep 17 00:00:00 2001 From: Pranith Kumar Date: Wed, 10 Dec 2014 15:42:28 -0800 Subject: slab: replace smp_read_barrier_depends() with lockless_dereference() Recently lockless_dereference() was added which can be used in place of hard-coding smp_read_barrier_depends(). The following PATCH makes the change. Signed-off-by: Pranith Kumar Cc: "Paul E. McKenney" Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/slab.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/slab.h b/mm/slab.h index 53a55c70c409..078acbcf64e8 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -209,15 +209,15 @@ cache_from_memcg_idx(struct kmem_cache *s, int idx) rcu_read_lock(); params = rcu_dereference(s->memcg_params); - cachep = params->memcg_caches[idx]; - rcu_read_unlock(); /* * Make sure we will access the up-to-date value. The code updating * memcg_caches issues a write barrier to match this (see * memcg_register_cache()). */ - smp_read_barrier_depends(); + cachep = lockless_dereference(params->memcg_caches[idx]); + rcu_read_unlock(); + return cachep; } -- cgit v1.2.3 From 3e32cb2e0a12b6915056ff04601cf1bb9b44f967 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:31 -0800 Subject: mm: memcontrol: lockless page counters Memory is internally accounted in bytes, using spinlock-protected 64-bit counters, even though the smallest accounting delta is a page. The counter interface is also convoluted and does too many things. Introduce a new lockless word-sized page counter API, then change all memory accounting over to it. The translation from and to bytes then only happens when interfacing with userspace. The removed locking overhead is noticable when scaling beyond the per-cpu charge caches - on a 4-socket machine with 144-threads, the following test shows the performance differences of 288 memcgs concurrently running a page fault benchmark: vanilla: 18631648.500498 task-clock (msec) # 140.643 CPUs utilized ( +- 0.33% ) 1,380,638 context-switches # 0.074 K/sec ( +- 0.75% ) 24,390 cpu-migrations # 0.001 K/sec ( +- 8.44% ) 1,843,305,768 page-faults # 0.099 M/sec ( +- 0.00% ) 50,134,994,088,218 cycles # 2.691 GHz ( +- 0.33% ) stalled-cycles-frontend stalled-cycles-backend 8,049,712,224,651 instructions # 0.16 insns per cycle ( +- 0.04% ) 1,586,970,584,979 branches # 85.176 M/sec ( +- 0.05% ) 1,724,989,949 branch-misses # 0.11% of all branches ( +- 0.48% ) 132.474343877 seconds time elapsed ( +- 0.21% ) lockless: 12195979.037525 task-clock (msec) # 133.480 CPUs utilized ( +- 0.18% ) 832,850 context-switches # 0.068 K/sec ( +- 0.54% ) 15,624 cpu-migrations # 0.001 K/sec ( +- 10.17% ) 1,843,304,774 page-faults # 0.151 M/sec ( +- 0.00% ) 32,811,216,801,141 cycles # 2.690 GHz ( +- 0.18% ) stalled-cycles-frontend stalled-cycles-backend 9,999,265,091,727 instructions # 0.30 insns per cycle ( +- 0.10% ) 2,076,759,325,203 branches # 170.282 M/sec ( +- 0.12% ) 1,656,917,214 branch-misses # 0.08% of all branches ( +- 0.55% ) 91.369330729 seconds time elapsed ( +- 0.45% ) On top of improved scalability, this also gets rid of the icky long long types in the very heart of memcg, which is great for 32 bit and also makes the code a lot more readable. Notable differences between the old and new API: - res_counter_charge() and res_counter_charge_nofail() become page_counter_try_charge() and page_counter_charge() resp. to match the more common kernel naming scheme of try_do()/do() - res_counter_uncharge_until() is only ever used to cancel a local counter and never to uncharge bigger segments of a hierarchy, so it's replaced by the simpler page_counter_cancel() - res_counter_set_limit() is replaced by page_counter_limit(), which expects its callers to serialize against themselves - res_counter_memparse_write_strategy() is replaced by page_counter_limit(), which rounds down to the nearest page size - rather than up. This is more reasonable for explicitely requested hard upper limits. - to keep charging light-weight, page_counter_try_charge() charges speculatively, only to roll back if the result exceeds the limit. Because of this, a failing bigger charge can temporarily lock out smaller charges that would otherwise succeed. The error is bounded to the difference between the smallest and the biggest possible charge size, so for memcg, this means that a failing THP charge can send base page charges into reclaim upto 2MB (4MB) before the limit would have been reached. This should be acceptable. [akpm@linux-foundation.org: add includes for WARN_ON_ONCE and memparse] [akpm@linux-foundation.org: add includes for WARN_ON_ONCE, memparse, strncmp, and PAGE_SIZE] Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Cc: Tejun Heo Cc: David Rientjes Cc: Stephen Rothwell Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/cgroups/memory.txt | 4 +- include/linux/memcontrol.h | 5 +- include/linux/page_counter.h | 51 ++++ include/net/sock.h | 26 +- init/Kconfig | 5 +- mm/Makefile | 1 + mm/memcontrol.c | 633 ++++++++++++++++++--------------------- mm/page_counter.c | 207 +++++++++++++ net/ipv4/tcp_memcontrol.c | 87 +++--- 9 files changed, 615 insertions(+), 404 deletions(-) create mode 100644 include/linux/page_counter.h create mode 100644 mm/page_counter.c (limited to 'mm') diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index 02ab997a1ed2..f624727ab404 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt @@ -52,9 +52,9 @@ Brief summary of control files. tasks # attach a task(thread) and show list of threads cgroup.procs # show list of processes cgroup.event_control # an interface for event_fd() - memory.usage_in_bytes # show current res_counter usage for memory + memory.usage_in_bytes # show current usage for memory (See 5.5 for details) - memory.memsw.usage_in_bytes # show current res_counter usage for memory+Swap + memory.memsw.usage_in_bytes # show current usage for memory+Swap (See 5.5 for details) memory.limit_in_bytes # set/show limit of memory usage memory.memsw.limit_in_bytes # set/show limit of memory+Swap usage diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 6b75640ef5ab..ea007615e8f9 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -447,9 +447,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) /* * __GFP_NOFAIL allocations will move on even if charging is not * possible. Therefore we don't even try, and have this allocation - * unaccounted. We could in theory charge it with - * res_counter_charge_nofail, but we hope those allocations are rare, - * and won't be worth the trouble. + * unaccounted. We could in theory charge it forcibly, but we hope + * those allocations are rare, and won't be worth the trouble. */ if (gfp & __GFP_NOFAIL) return true; diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h new file mode 100644 index 000000000000..7cce3be99ff3 --- /dev/null +++ b/include/linux/page_counter.h @@ -0,0 +1,51 @@ +#ifndef _LINUX_PAGE_COUNTER_H +#define _LINUX_PAGE_COUNTER_H + +#include +#include +#include + +struct page_counter { + atomic_long_t count; + unsigned long limit; + struct page_counter *parent; + + /* legacy */ + unsigned long watermark; + unsigned long failcnt; +}; + +#if BITS_PER_LONG == 32 +#define PAGE_COUNTER_MAX LONG_MAX +#else +#define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) +#endif + +static inline void page_counter_init(struct page_counter *counter, + struct page_counter *parent) +{ + atomic_long_set(&counter->count, 0); + counter->limit = PAGE_COUNTER_MAX; + counter->parent = parent; +} + +static inline unsigned long page_counter_read(struct page_counter *counter) +{ + return atomic_long_read(&counter->count); +} + +int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); +void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail); +int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); +int page_counter_limit(struct page_counter *counter, unsigned long limit); +int page_counter_memparse(const char *buf, unsigned long *nr_pages); + +static inline void page_counter_reset_watermark(struct page_counter *counter) +{ + counter->watermark = page_counter_read(counter); +} + +#endif /* _LINUX_PAGE_COUNTER_H */ diff --git a/include/net/sock.h b/include/net/sock.h index e6f235ebf6c9..7ff44e062a38 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -54,8 +54,8 @@ #include #include #include +#include #include -#include #include #include #include @@ -1062,7 +1062,7 @@ enum cg_proto_flags { }; struct cg_proto { - struct res_counter memory_allocated; /* Current allocated memory. */ + struct page_counter memory_allocated; /* Current allocated memory. */ struct percpu_counter sockets_allocated; /* Current number of sockets. */ int memory_pressure; long sysctl_mem[3]; @@ -1214,34 +1214,26 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot, unsigned long amt, int *parent_status) { - struct res_counter *fail; - int ret; + page_counter_charge(&prot->memory_allocated, amt); - ret = res_counter_charge_nofail(&prot->memory_allocated, - amt << PAGE_SHIFT, &fail); - if (ret < 0) + if (page_counter_read(&prot->memory_allocated) > + prot->memory_allocated.limit) *parent_status = OVER_LIMIT; } static inline void memcg_memory_allocated_sub(struct cg_proto *prot, unsigned long amt) { - res_counter_uncharge(&prot->memory_allocated, amt << PAGE_SHIFT); -} - -static inline u64 memcg_memory_allocated_read(struct cg_proto *prot) -{ - u64 ret; - ret = res_counter_read_u64(&prot->memory_allocated, RES_USAGE); - return ret >> PAGE_SHIFT; + page_counter_uncharge(&prot->memory_allocated, amt); } static inline long sk_memory_allocated(const struct sock *sk) { struct proto *prot = sk->sk_prot; + if (mem_cgroup_sockets_enabled && sk->sk_cgrp) - return memcg_memory_allocated_read(sk->sk_cgrp); + return page_counter_read(&sk->sk_cgrp->memory_allocated); return atomic_long_read(prot->memory_allocated); } @@ -1255,7 +1247,7 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status) memcg_memory_allocated_add(sk->sk_cgrp, amt, parent_status); /* update the root cgroup regardless */ atomic_long_add_return(amt, prot->memory_allocated); - return memcg_memory_allocated_read(sk->sk_cgrp); + return page_counter_read(&sk->sk_cgrp->memory_allocated); } return atomic_long_add_return(amt, prot->memory_allocated); diff --git a/init/Kconfig b/init/Kconfig index 903505e66d1d..fd9e88791ba4 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -978,9 +978,12 @@ config RESOURCE_COUNTERS This option enables controller independent resource accounting infrastructure that works with cgroups. +config PAGE_COUNTER + bool + config MEMCG bool "Memory Resource Controller for Control Groups" - depends on RESOURCE_COUNTERS + select PAGE_COUNTER select EVENTFD help Provides a memory resource controller that manages both anonymous diff --git a/mm/Makefile b/mm/Makefile index 8405eb0023a9..6d9f40e922f7 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -55,6 +55,7 @@ obj-$(CONFIG_FS_XIP) += filemap_xip.o obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o +obj-$(CONFIG_PAGE_COUNTER) += page_counter.o obj-$(CONFIG_MEMCG) += memcontrol.o page_cgroup.o vmpressure.o obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d6ac0e33e150..4129ad74e93b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -25,7 +25,7 @@ * GNU General Public License for more details. */ -#include +#include #include #include #include @@ -165,7 +165,7 @@ struct mem_cgroup_per_zone { struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; struct rb_node tree_node; /* RB tree node */ - unsigned long long usage_in_excess;/* Set to the value by which */ + unsigned long usage_in_excess;/* Set to the value by which */ /* the soft limit is exceeded*/ bool on_tree; struct mem_cgroup *memcg; /* Back pointer, we cannot */ @@ -198,7 +198,7 @@ static struct mem_cgroup_tree soft_limit_tree __read_mostly; struct mem_cgroup_threshold { struct eventfd_ctx *eventfd; - u64 threshold; + unsigned long threshold; }; /* For threshold */ @@ -284,10 +284,13 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg); */ struct mem_cgroup { struct cgroup_subsys_state css; - /* - * the counter to account for memory usage - */ - struct res_counter res; + + /* Accounted resources */ + struct page_counter memory; + struct page_counter memsw; + struct page_counter kmem; + + unsigned long soft_limit; /* vmpressure notifications */ struct vmpressure vmpressure; @@ -295,15 +298,6 @@ struct mem_cgroup { /* css_online() has been completed */ int initialized; - /* - * the counter to account for mem+swap usage. - */ - struct res_counter memsw; - - /* - * the counter to account for kernel memory usage. - */ - struct res_counter kmem; /* * Should the accounting and control be hierarchical, per subtree? */ @@ -650,7 +644,7 @@ static void disarm_kmem_keys(struct mem_cgroup *memcg) * This check can't live in kmem destruction function, * since the charges will outlive the cgroup */ - WARN_ON(res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0); + WARN_ON(page_counter_read(&memcg->kmem)); } #else static void disarm_kmem_keys(struct mem_cgroup *memcg) @@ -706,7 +700,7 @@ soft_limit_tree_from_page(struct page *page) static void __mem_cgroup_insert_exceeded(struct mem_cgroup_per_zone *mz, struct mem_cgroup_tree_per_zone *mctz, - unsigned long long new_usage_in_excess) + unsigned long new_usage_in_excess) { struct rb_node **p = &mctz->rb_root.rb_node; struct rb_node *parent = NULL; @@ -755,10 +749,21 @@ static void mem_cgroup_remove_exceeded(struct mem_cgroup_per_zone *mz, spin_unlock_irqrestore(&mctz->lock, flags); } +static unsigned long soft_limit_excess(struct mem_cgroup *memcg) +{ + unsigned long nr_pages = page_counter_read(&memcg->memory); + unsigned long soft_limit = ACCESS_ONCE(memcg->soft_limit); + unsigned long excess = 0; + + if (nr_pages > soft_limit) + excess = nr_pages - soft_limit; + + return excess; +} static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) { - unsigned long long excess; + unsigned long excess; struct mem_cgroup_per_zone *mz; struct mem_cgroup_tree_per_zone *mctz; @@ -769,7 +774,7 @@ static void mem_cgroup_update_tree(struct mem_cgroup *memcg, struct page *page) */ for (; memcg; memcg = parent_mem_cgroup(memcg)) { mz = mem_cgroup_page_zoneinfo(memcg, page); - excess = res_counter_soft_limit_excess(&memcg->res); + excess = soft_limit_excess(memcg); /* * We have to update the tree if mz is on RB-tree or * mem is over its softlimit. @@ -825,7 +830,7 @@ retry: * position in the tree. */ __mem_cgroup_remove_exceeded(mz, mctz); - if (!res_counter_soft_limit_excess(&mz->memcg->res) || + if (!soft_limit_excess(mz->memcg) || !css_tryget_online(&mz->memcg->css)) goto retry; done: @@ -1492,7 +1497,7 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) return inactive * inactive_ratio < active; } -#define mem_cgroup_from_res_counter(counter, member) \ +#define mem_cgroup_from_counter(counter, member) \ container_of(counter, struct mem_cgroup, member) /** @@ -1504,12 +1509,23 @@ int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec) */ static unsigned long mem_cgroup_margin(struct mem_cgroup *memcg) { - unsigned long long margin; + unsigned long margin = 0; + unsigned long count; + unsigned long limit; - margin = res_counter_margin(&memcg->res); - if (do_swap_account) - margin = min(margin, res_counter_margin(&memcg->memsw)); - return margin >> PAGE_SHIFT; + count = page_counter_read(&memcg->memory); + limit = ACCESS_ONCE(memcg->memory.limit); + if (count < limit) + margin = limit - count; + + if (do_swap_account) { + count = page_counter_read(&memcg->memsw); + limit = ACCESS_ONCE(memcg->memsw.limit); + if (count <= limit) + margin = min(margin, limit - count); + } + + return margin; } int mem_cgroup_swappiness(struct mem_cgroup *memcg) @@ -1644,18 +1660,15 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) rcu_read_unlock(); - pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n", - res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, - res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, - res_counter_read_u64(&memcg->res, RES_FAILCNT)); - pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %llu\n", - res_counter_read_u64(&memcg->memsw, RES_USAGE) >> 10, - res_counter_read_u64(&memcg->memsw, RES_LIMIT) >> 10, - res_counter_read_u64(&memcg->memsw, RES_FAILCNT)); - pr_info("kmem: usage %llukB, limit %llukB, failcnt %llu\n", - res_counter_read_u64(&memcg->kmem, RES_USAGE) >> 10, - res_counter_read_u64(&memcg->kmem, RES_LIMIT) >> 10, - res_counter_read_u64(&memcg->kmem, RES_FAILCNT)); + pr_info("memory: usage %llukB, limit %llukB, failcnt %lu\n", + K((u64)page_counter_read(&memcg->memory)), + K((u64)memcg->memory.limit), memcg->memory.failcnt); + pr_info("memory+swap: usage %llukB, limit %llukB, failcnt %lu\n", + K((u64)page_counter_read(&memcg->memsw)), + K((u64)memcg->memsw.limit), memcg->memsw.failcnt); + pr_info("kmem: usage %llukB, limit %llukB, failcnt %lu\n", + K((u64)page_counter_read(&memcg->kmem)), + K((u64)memcg->kmem.limit), memcg->kmem.failcnt); for_each_mem_cgroup_tree(iter, memcg) { pr_info("Memory cgroup stats for "); @@ -1695,28 +1708,17 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg) /* * Return the memory (and swap, if configured) limit for a memcg. */ -static u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) +static unsigned long mem_cgroup_get_limit(struct mem_cgroup *memcg) { - u64 limit; + unsigned long limit; - limit = res_counter_read_u64(&memcg->res, RES_LIMIT); - - /* - * Do not consider swap space if we cannot swap due to swappiness - */ + limit = memcg->memory.limit; if (mem_cgroup_swappiness(memcg)) { - u64 memsw; + unsigned long memsw_limit; - limit += total_swap_pages << PAGE_SHIFT; - memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); - - /* - * If memsw is finite and limits the amount of swap space - * available to this memcg, return that limit. - */ - limit = min(limit, memsw); + memsw_limit = memcg->memsw.limit; + limit = min(limit + total_swap_pages, memsw_limit); } - return limit; } @@ -1740,7 +1742,7 @@ static void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, } check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); - totalpages = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1; + totalpages = mem_cgroup_get_limit(memcg) ? : 1; for_each_mem_cgroup_tree(iter, memcg) { struct css_task_iter it; struct task_struct *task; @@ -1943,7 +1945,7 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, .priority = 0, }; - excess = res_counter_soft_limit_excess(&root_memcg->res) >> PAGE_SHIFT; + excess = soft_limit_excess(root_memcg); while (1) { victim = mem_cgroup_iter(root_memcg, victim, &reclaim); @@ -1974,7 +1976,7 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, zone, &nr_scanned); *total_scanned += nr_scanned; - if (!res_counter_soft_limit_excess(&root_memcg->res)) + if (!soft_limit_excess(root_memcg)) break; } mem_cgroup_iter_break(root_memcg, victim); @@ -2316,33 +2318,31 @@ static DEFINE_MUTEX(percpu_charge_mutex); static bool consume_stock(struct mem_cgroup *memcg, unsigned int nr_pages) { struct memcg_stock_pcp *stock; - bool ret = true; + bool ret = false; if (nr_pages > CHARGE_BATCH) - return false; + return ret; stock = &get_cpu_var(memcg_stock); - if (memcg == stock->cached && stock->nr_pages >= nr_pages) + if (memcg == stock->cached && stock->nr_pages >= nr_pages) { stock->nr_pages -= nr_pages; - else /* need to call res_counter_charge */ - ret = false; + ret = true; + } put_cpu_var(memcg_stock); return ret; } /* - * Returns stocks cached in percpu to res_counter and reset cached information. + * Returns stocks cached in percpu and reset cached information. */ static void drain_stock(struct memcg_stock_pcp *stock) { struct mem_cgroup *old = stock->cached; if (stock->nr_pages) { - unsigned long bytes = stock->nr_pages * PAGE_SIZE; - - res_counter_uncharge(&old->res, bytes); + page_counter_uncharge(&old->memory, stock->nr_pages); if (do_swap_account) - res_counter_uncharge(&old->memsw, bytes); + page_counter_uncharge(&old->memsw, stock->nr_pages); stock->nr_pages = 0; } stock->cached = NULL; @@ -2371,7 +2371,7 @@ static void __init memcg_stock_init(void) } /* - * Cache charges(val) which is from res_counter, to local per_cpu area. + * Cache charges(val) to local per_cpu area. * This will be consumed by consume_stock() function, later. */ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) @@ -2431,8 +2431,7 @@ out: /* * Tries to drain stocked charges in other cpus. This function is asynchronous * and just put a work per cpu for draining localy on each cpu. Caller can - * expects some charges will be back to res_counter later but cannot wait for - * it. + * expects some charges will be back later but cannot wait for it. */ static void drain_all_stock_async(struct mem_cgroup *root_memcg) { @@ -2506,9 +2505,8 @@ static int try_charge(struct mem_cgroup *memcg, gfp_t gfp_mask, unsigned int batch = max(CHARGE_BATCH, nr_pages); int nr_retries = MEM_CGROUP_RECLAIM_RETRIES; struct mem_cgroup *mem_over_limit; - struct res_counter *fail_res; + struct page_counter *counter; unsigned long nr_reclaimed; - unsigned long long size; bool may_swap = true; bool drained = false; int ret = 0; @@ -2519,16 +2517,15 @@ retry: if (consume_stock(memcg, nr_pages)) goto done; - size = batch * PAGE_SIZE; if (!do_swap_account || - !res_counter_charge(&memcg->memsw, size, &fail_res)) { - if (!res_counter_charge(&memcg->res, size, &fail_res)) + !page_counter_try_charge(&memcg->memsw, batch, &counter)) { + if (!page_counter_try_charge(&memcg->memory, batch, &counter)) goto done_restock; if (do_swap_account) - res_counter_uncharge(&memcg->memsw, size); - mem_over_limit = mem_cgroup_from_res_counter(fail_res, res); + page_counter_uncharge(&memcg->memsw, batch); + mem_over_limit = mem_cgroup_from_counter(counter, memory); } else { - mem_over_limit = mem_cgroup_from_res_counter(fail_res, memsw); + mem_over_limit = mem_cgroup_from_counter(counter, memsw); may_swap = false; } @@ -2611,32 +2608,12 @@ done: static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) { - unsigned long bytes = nr_pages * PAGE_SIZE; - if (mem_cgroup_is_root(memcg)) return; - res_counter_uncharge(&memcg->res, bytes); + page_counter_uncharge(&memcg->memory, nr_pages); if (do_swap_account) - res_counter_uncharge(&memcg->memsw, bytes); -} - -/* - * Cancel chrages in this cgroup....doesn't propagate to parent cgroup. - * This is useful when moving usage to parent cgroup. - */ -static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg, - unsigned int nr_pages) -{ - unsigned long bytes = nr_pages * PAGE_SIZE; - - if (mem_cgroup_is_root(memcg)) - return; - - res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes); - if (do_swap_account) - res_counter_uncharge_until(&memcg->memsw, - memcg->memsw.parent, bytes); + page_counter_uncharge(&memcg->memsw, nr_pages); } /* @@ -2760,8 +2737,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, unlock_page_lru(page, isolated); } -static DEFINE_MUTEX(set_limit_mutex); - #ifdef CONFIG_MEMCG_KMEM /* * The memcg_slab_mutex is held whenever a per memcg kmem cache is created or @@ -2804,16 +2779,17 @@ static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) } #endif -static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) +static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, + unsigned long nr_pages) { - struct res_counter *fail_res; + struct page_counter *counter; int ret = 0; - ret = res_counter_charge(&memcg->kmem, size, &fail_res); - if (ret) + ret = page_counter_try_charge(&memcg->kmem, nr_pages, &counter); + if (ret < 0) return ret; - ret = try_charge(memcg, gfp, size >> PAGE_SHIFT); + ret = try_charge(memcg, gfp, nr_pages); if (ret == -EINTR) { /* * try_charge() chose to bypass to root due to OOM kill or @@ -2830,25 +2806,25 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, u64 size) * when the allocation triggers should have been already * directed to the root cgroup in memcontrol.h */ - res_counter_charge_nofail(&memcg->res, size, &fail_res); + page_counter_charge(&memcg->memory, nr_pages); if (do_swap_account) - res_counter_charge_nofail(&memcg->memsw, size, - &fail_res); + page_counter_charge(&memcg->memsw, nr_pages); ret = 0; } else if (ret) - res_counter_uncharge(&memcg->kmem, size); + page_counter_uncharge(&memcg->kmem, nr_pages); return ret; } -static void memcg_uncharge_kmem(struct mem_cgroup *memcg, u64 size) +static void memcg_uncharge_kmem(struct mem_cgroup *memcg, + unsigned long nr_pages) { - res_counter_uncharge(&memcg->res, size); + page_counter_uncharge(&memcg->memory, nr_pages); if (do_swap_account) - res_counter_uncharge(&memcg->memsw, size); + page_counter_uncharge(&memcg->memsw, nr_pages); /* Not down to 0 */ - if (res_counter_uncharge(&memcg->kmem, size)) + if (page_counter_uncharge(&memcg->kmem, nr_pages)) return; /* @@ -3124,19 +3100,21 @@ static void memcg_schedule_register_cache(struct mem_cgroup *memcg, int __memcg_charge_slab(struct kmem_cache *cachep, gfp_t gfp, int order) { + unsigned int nr_pages = 1 << order; int res; - res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp, - PAGE_SIZE << order); + res = memcg_charge_kmem(cachep->memcg_params->memcg, gfp, nr_pages); if (!res) - atomic_add(1 << order, &cachep->memcg_params->nr_pages); + atomic_add(nr_pages, &cachep->memcg_params->nr_pages); return res; } void __memcg_uncharge_slab(struct kmem_cache *cachep, int order) { - memcg_uncharge_kmem(cachep->memcg_params->memcg, PAGE_SIZE << order); - atomic_sub(1 << order, &cachep->memcg_params->nr_pages); + unsigned int nr_pages = 1 << order; + + memcg_uncharge_kmem(cachep->memcg_params->memcg, nr_pages); + atomic_sub(nr_pages, &cachep->memcg_params->nr_pages); } /* @@ -3257,7 +3235,7 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) return true; } - ret = memcg_charge_kmem(memcg, gfp, PAGE_SIZE << order); + ret = memcg_charge_kmem(memcg, gfp, 1 << order); if (!ret) *_memcg = memcg; @@ -3274,7 +3252,7 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, /* The page allocation failed. Revert */ if (!page) { - memcg_uncharge_kmem(memcg, PAGE_SIZE << order); + memcg_uncharge_kmem(memcg, 1 << order); return; } /* @@ -3307,7 +3285,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order) return; VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); - memcg_uncharge_kmem(memcg, PAGE_SIZE << order); + memcg_uncharge_kmem(memcg, 1 << order); } #else static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) @@ -3485,8 +3463,12 @@ static int mem_cgroup_move_parent(struct page *page, ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent); - if (!ret) - __mem_cgroup_cancel_local_charge(child, nr_pages); + if (!ret) { + /* Take charge off the local counters */ + page_counter_cancel(&child->memory, nr_pages); + if (do_swap_account) + page_counter_cancel(&child->memsw, nr_pages); + } if (nr_pages > 1) compound_unlock_irqrestore(page, flags); @@ -3516,7 +3498,7 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, * * Returns 0 on success, -EINVAL on failure. * - * The caller must have charged to @to, IOW, called res_counter_charge() about + * The caller must have charged to @to, IOW, called page_counter_charge() about * both res and memsw, and called css_get(). */ static int mem_cgroup_move_swap_account(swp_entry_t entry, @@ -3532,7 +3514,7 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry, mem_cgroup_swap_statistics(to, true); /* * This function is only called from task migration context now. - * It postpones res_counter and refcount handling till the end + * It postpones page_counter and refcount handling till the end * of task migration(mem_cgroup_clear_mc()) for performance * improvement. But we cannot postpone css_get(to) because if * the process that has been moved to @to does swap-in, the @@ -3590,60 +3572,57 @@ void mem_cgroup_print_bad_page(struct page *page) } #endif +static DEFINE_MUTEX(memcg_limit_mutex); + static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, - unsigned long long val) + unsigned long limit) { + unsigned long curusage; + unsigned long oldusage; + bool enlarge = false; int retry_count; - int ret = 0; - int children = mem_cgroup_count_children(memcg); - u64 curusage, oldusage; - int enlarge; + int ret; /* * For keeping hierarchical_reclaim simple, how long we should retry * is depends on callers. We set our retry-count to be function * of # of children which we should visit in this loop. */ - retry_count = MEM_CGROUP_RECLAIM_RETRIES * children; + retry_count = MEM_CGROUP_RECLAIM_RETRIES * + mem_cgroup_count_children(memcg); - oldusage = res_counter_read_u64(&memcg->res, RES_USAGE); + oldusage = page_counter_read(&memcg->memory); - enlarge = 0; - while (retry_count) { + do { if (signal_pending(current)) { ret = -EINTR; break; } - /* - * Rather than hide all in some function, I do this in - * open coded manner. You see what this really does. - * We have to guarantee memcg->res.limit <= memcg->memsw.limit. - */ - mutex_lock(&set_limit_mutex); - if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val) { + + mutex_lock(&memcg_limit_mutex); + if (limit > memcg->memsw.limit) { + mutex_unlock(&memcg_limit_mutex); ret = -EINVAL; - mutex_unlock(&set_limit_mutex); break; } - - if (res_counter_read_u64(&memcg->res, RES_LIMIT) < val) - enlarge = 1; - - ret = res_counter_set_limit(&memcg->res, val); - mutex_unlock(&set_limit_mutex); + if (limit > memcg->memory.limit) + enlarge = true; + ret = page_counter_limit(&memcg->memory, limit); + mutex_unlock(&memcg_limit_mutex); if (!ret) break; try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, true); - curusage = res_counter_read_u64(&memcg->res, RES_USAGE); + curusage = page_counter_read(&memcg->memory); /* Usage is reduced ? */ if (curusage >= oldusage) retry_count--; else oldusage = curusage; - } + } while (retry_count); + if (!ret && enlarge) memcg_oom_recover(memcg); @@ -3651,52 +3630,53 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, } static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, - unsigned long long val) + unsigned long limit) { + unsigned long curusage; + unsigned long oldusage; + bool enlarge = false; int retry_count; - u64 oldusage, curusage; - int children = mem_cgroup_count_children(memcg); - int ret = -EBUSY; - int enlarge = 0; + int ret; /* see mem_cgroup_resize_res_limit */ - retry_count = children * MEM_CGROUP_RECLAIM_RETRIES; - oldusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); - while (retry_count) { + retry_count = MEM_CGROUP_RECLAIM_RETRIES * + mem_cgroup_count_children(memcg); + + oldusage = page_counter_read(&memcg->memsw); + + do { if (signal_pending(current)) { ret = -EINTR; break; } - /* - * Rather than hide all in some function, I do this in - * open coded manner. You see what this really does. - * We have to guarantee memcg->res.limit <= memcg->memsw.limit. - */ - mutex_lock(&set_limit_mutex); - if (res_counter_read_u64(&memcg->res, RES_LIMIT) > val) { + + mutex_lock(&memcg_limit_mutex); + if (limit < memcg->memory.limit) { + mutex_unlock(&memcg_limit_mutex); ret = -EINVAL; - mutex_unlock(&set_limit_mutex); break; } - if (res_counter_read_u64(&memcg->memsw, RES_LIMIT) < val) - enlarge = 1; - ret = res_counter_set_limit(&memcg->memsw, val); - mutex_unlock(&set_limit_mutex); + if (limit > memcg->memsw.limit) + enlarge = true; + ret = page_counter_limit(&memcg->memsw, limit); + mutex_unlock(&memcg_limit_mutex); if (!ret) break; try_to_free_mem_cgroup_pages(memcg, 1, GFP_KERNEL, false); - curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); + curusage = page_counter_read(&memcg->memsw); /* Usage is reduced ? */ if (curusage >= oldusage) retry_count--; else oldusage = curusage; - } + } while (retry_count); + if (!ret && enlarge) memcg_oom_recover(memcg); + return ret; } @@ -3709,7 +3689,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, unsigned long reclaimed; int loop = 0; struct mem_cgroup_tree_per_zone *mctz; - unsigned long long excess; + unsigned long excess; unsigned long nr_scanned; if (order > 0) @@ -3763,7 +3743,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, } while (1); } __mem_cgroup_remove_exceeded(mz, mctz); - excess = res_counter_soft_limit_excess(&mz->memcg->res); + excess = soft_limit_excess(mz->memcg); /* * One school of thought says that we should not add * back the node to the tree if reclaim returns 0. @@ -3856,7 +3836,6 @@ static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg, static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) { int node, zid; - u64 usage; do { /* This is for making all *used* pages to be on LRU. */ @@ -3888,9 +3867,8 @@ static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) * right after the check. RES_USAGE should be safe as we always * charge before adding to the LRU. */ - usage = res_counter_read_u64(&memcg->res, RES_USAGE) - - res_counter_read_u64(&memcg->kmem, RES_USAGE); - } while (usage > 0); + } while (page_counter_read(&memcg->memory) - + page_counter_read(&memcg->kmem) > 0); } /* @@ -3930,7 +3908,7 @@ static int mem_cgroup_force_empty(struct mem_cgroup *memcg) /* we call try-to-free pages for make this cgroup empty */ lru_add_drain_all(); /* try to free all pages in this cgroup */ - while (nr_retries && res_counter_read_u64(&memcg->res, RES_USAGE) > 0) { + while (nr_retries && page_counter_read(&memcg->memory)) { int progress; if (signal_pending(current)) @@ -4001,8 +3979,8 @@ out: return retval; } -static unsigned long mem_cgroup_recursive_stat(struct mem_cgroup *memcg, - enum mem_cgroup_stat_index idx) +static unsigned long tree_stat(struct mem_cgroup *memcg, + enum mem_cgroup_stat_index idx) { struct mem_cgroup *iter; long val = 0; @@ -4020,55 +3998,72 @@ static inline u64 mem_cgroup_usage(struct mem_cgroup *memcg, bool swap) { u64 val; - if (!mem_cgroup_is_root(memcg)) { + if (mem_cgroup_is_root(memcg)) { + val = tree_stat(memcg, MEM_CGROUP_STAT_CACHE); + val += tree_stat(memcg, MEM_CGROUP_STAT_RSS); + if (swap) + val += tree_stat(memcg, MEM_CGROUP_STAT_SWAP); + } else { if (!swap) - return res_counter_read_u64(&memcg->res, RES_USAGE); + val = page_counter_read(&memcg->memory); else - return res_counter_read_u64(&memcg->memsw, RES_USAGE); + val = page_counter_read(&memcg->memsw); } - - /* - * Transparent hugepages are still accounted for in MEM_CGROUP_STAT_RSS - * as well as in MEM_CGROUP_STAT_RSS_HUGE. - */ - val = mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_CACHE); - val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_RSS); - - if (swap) - val += mem_cgroup_recursive_stat(memcg, MEM_CGROUP_STAT_SWAP); - return val << PAGE_SHIFT; } +enum { + RES_USAGE, + RES_LIMIT, + RES_MAX_USAGE, + RES_FAILCNT, + RES_SOFT_LIMIT, +}; static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - enum res_type type = MEMFILE_TYPE(cft->private); - int name = MEMFILE_ATTR(cft->private); + struct page_counter *counter; - switch (type) { + switch (MEMFILE_TYPE(cft->private)) { case _MEM: - if (name == RES_USAGE) - return mem_cgroup_usage(memcg, false); - return res_counter_read_u64(&memcg->res, name); + counter = &memcg->memory; + break; case _MEMSWAP: - if (name == RES_USAGE) - return mem_cgroup_usage(memcg, true); - return res_counter_read_u64(&memcg->memsw, name); + counter = &memcg->memsw; + break; case _KMEM: - return res_counter_read_u64(&memcg->kmem, name); + counter = &memcg->kmem; break; default: BUG(); } + + switch (MEMFILE_ATTR(cft->private)) { + case RES_USAGE: + if (counter == &memcg->memory) + return mem_cgroup_usage(memcg, false); + if (counter == &memcg->memsw) + return mem_cgroup_usage(memcg, true); + return (u64)page_counter_read(counter) * PAGE_SIZE; + case RES_LIMIT: + return (u64)counter->limit * PAGE_SIZE; + case RES_MAX_USAGE: + return (u64)counter->watermark * PAGE_SIZE; + case RES_FAILCNT: + return counter->failcnt; + case RES_SOFT_LIMIT: + return (u64)memcg->soft_limit * PAGE_SIZE; + default: + BUG(); + } } #ifdef CONFIG_MEMCG_KMEM /* should be called with activate_kmem_mutex held */ static int __memcg_activate_kmem(struct mem_cgroup *memcg, - unsigned long long limit) + unsigned long nr_pages) { int err = 0; int memcg_id; @@ -4115,7 +4110,7 @@ static int __memcg_activate_kmem(struct mem_cgroup *memcg, * We couldn't have accounted to this cgroup, because it hasn't got the * active bit set yet, so this should succeed. */ - err = res_counter_set_limit(&memcg->kmem, limit); + err = page_counter_limit(&memcg->kmem, nr_pages); VM_BUG_ON(err); static_key_slow_inc(&memcg_kmem_enabled_key); @@ -4131,25 +4126,27 @@ out: } static int memcg_activate_kmem(struct mem_cgroup *memcg, - unsigned long long limit) + unsigned long nr_pages) { int ret; mutex_lock(&activate_kmem_mutex); - ret = __memcg_activate_kmem(memcg, limit); + ret = __memcg_activate_kmem(memcg, nr_pages); mutex_unlock(&activate_kmem_mutex); return ret; } static int memcg_update_kmem_limit(struct mem_cgroup *memcg, - unsigned long long val) + unsigned long limit) { int ret; + mutex_lock(&memcg_limit_mutex); if (!memcg_kmem_is_active(memcg)) - ret = memcg_activate_kmem(memcg, val); + ret = memcg_activate_kmem(memcg, limit); else - ret = res_counter_set_limit(&memcg->kmem, val); + ret = page_counter_limit(&memcg->kmem, limit); + mutex_unlock(&memcg_limit_mutex); return ret; } @@ -4167,13 +4164,13 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg) * after this point, because it has at least one child already. */ if (memcg_kmem_is_active(parent)) - ret = __memcg_activate_kmem(memcg, RES_COUNTER_MAX); + ret = __memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); mutex_unlock(&activate_kmem_mutex); return ret; } #else static int memcg_update_kmem_limit(struct mem_cgroup *memcg, - unsigned long long val) + unsigned long limit) { return -EINVAL; } @@ -4187,110 +4184,69 @@ static ssize_t mem_cgroup_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); - enum res_type type; - int name; - unsigned long long val; + unsigned long nr_pages; int ret; buf = strstrip(buf); - type = MEMFILE_TYPE(of_cft(of)->private); - name = MEMFILE_ATTR(of_cft(of)->private); + ret = page_counter_memparse(buf, &nr_pages); + if (ret) + return ret; - switch (name) { + switch (MEMFILE_ATTR(of_cft(of)->private)) { case RES_LIMIT: if (mem_cgroup_is_root(memcg)) { /* Can't set limit on root */ ret = -EINVAL; break; } - /* This function does all necessary parse...reuse it */ - ret = res_counter_memparse_write_strategy(buf, &val); - if (ret) + switch (MEMFILE_TYPE(of_cft(of)->private)) { + case _MEM: + ret = mem_cgroup_resize_limit(memcg, nr_pages); break; - if (type == _MEM) - ret = mem_cgroup_resize_limit(memcg, val); - else if (type == _MEMSWAP) - ret = mem_cgroup_resize_memsw_limit(memcg, val); - else if (type == _KMEM) - ret = memcg_update_kmem_limit(memcg, val); - else - return -EINVAL; - break; - case RES_SOFT_LIMIT: - ret = res_counter_memparse_write_strategy(buf, &val); - if (ret) + case _MEMSWAP: + ret = mem_cgroup_resize_memsw_limit(memcg, nr_pages); break; - /* - * For memsw, soft limits are hard to implement in terms - * of semantics, for now, we support soft limits for - * control without swap - */ - if (type == _MEM) - ret = res_counter_set_soft_limit(&memcg->res, val); - else - ret = -EINVAL; + case _KMEM: + ret = memcg_update_kmem_limit(memcg, nr_pages); + break; + } break; - default: - ret = -EINVAL; /* should be BUG() ? */ + case RES_SOFT_LIMIT: + memcg->soft_limit = nr_pages; + ret = 0; break; } return ret ?: nbytes; } -static void memcg_get_hierarchical_limit(struct mem_cgroup *memcg, - unsigned long long *mem_limit, unsigned long long *memsw_limit) -{ - unsigned long long min_limit, min_memsw_limit, tmp; - - min_limit = res_counter_read_u64(&memcg->res, RES_LIMIT); - min_memsw_limit = res_counter_read_u64(&memcg->memsw, RES_LIMIT); - if (!memcg->use_hierarchy) - goto out; - - while (memcg->css.parent) { - memcg = mem_cgroup_from_css(memcg->css.parent); - if (!memcg->use_hierarchy) - break; - tmp = res_counter_read_u64(&memcg->res, RES_LIMIT); - min_limit = min(min_limit, tmp); - tmp = res_counter_read_u64(&memcg->memsw, RES_LIMIT); - min_memsw_limit = min(min_memsw_limit, tmp); - } -out: - *mem_limit = min_limit; - *memsw_limit = min_memsw_limit; -} - static ssize_t mem_cgroup_reset(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); - int name; - enum res_type type; + struct page_counter *counter; - type = MEMFILE_TYPE(of_cft(of)->private); - name = MEMFILE_ATTR(of_cft(of)->private); + switch (MEMFILE_TYPE(of_cft(of)->private)) { + case _MEM: + counter = &memcg->memory; + break; + case _MEMSWAP: + counter = &memcg->memsw; + break; + case _KMEM: + counter = &memcg->kmem; + break; + default: + BUG(); + } - switch (name) { + switch (MEMFILE_ATTR(of_cft(of)->private)) { case RES_MAX_USAGE: - if (type == _MEM) - res_counter_reset_max(&memcg->res); - else if (type == _MEMSWAP) - res_counter_reset_max(&memcg->memsw); - else if (type == _KMEM) - res_counter_reset_max(&memcg->kmem); - else - return -EINVAL; + page_counter_reset_watermark(counter); break; case RES_FAILCNT: - if (type == _MEM) - res_counter_reset_failcnt(&memcg->res); - else if (type == _MEMSWAP) - res_counter_reset_failcnt(&memcg->memsw); - else if (type == _KMEM) - res_counter_reset_failcnt(&memcg->kmem); - else - return -EINVAL; + counter->failcnt = 0; break; + default: + BUG(); } return nbytes; @@ -4387,6 +4343,7 @@ static inline void mem_cgroup_lru_names_not_uptodate(void) static int memcg_stat_show(struct seq_file *m, void *v) { struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + unsigned long memory, memsw; struct mem_cgroup *mi; unsigned int i; @@ -4406,14 +4363,16 @@ static int memcg_stat_show(struct seq_file *m, void *v) mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE); /* Hierarchical information */ - { - unsigned long long limit, memsw_limit; - memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit); - seq_printf(m, "hierarchical_memory_limit %llu\n", limit); - if (do_swap_account) - seq_printf(m, "hierarchical_memsw_limit %llu\n", - memsw_limit); + memory = memsw = PAGE_COUNTER_MAX; + for (mi = memcg; mi; mi = parent_mem_cgroup(mi)) { + memory = min(memory, mi->memory.limit); + memsw = min(memsw, mi->memsw.limit); } + seq_printf(m, "hierarchical_memory_limit %llu\n", + (u64)memory * PAGE_SIZE); + if (do_swap_account) + seq_printf(m, "hierarchical_memsw_limit %llu\n", + (u64)memsw * PAGE_SIZE); for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { long long val = 0; @@ -4497,7 +4456,7 @@ static int mem_cgroup_swappiness_write(struct cgroup_subsys_state *css, static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) { struct mem_cgroup_threshold_ary *t; - u64 usage; + unsigned long usage; int i; rcu_read_lock(); @@ -4596,10 +4555,11 @@ static int __mem_cgroup_usage_register_event(struct mem_cgroup *memcg, { struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; - u64 threshold, usage; + unsigned long threshold; + unsigned long usage; int i, size, ret; - ret = res_counter_memparse_write_strategy(args, &threshold); + ret = page_counter_memparse(args, &threshold); if (ret) return ret; @@ -4689,7 +4649,7 @@ static void __mem_cgroup_usage_unregister_event(struct mem_cgroup *memcg, { struct mem_cgroup_thresholds *thresholds; struct mem_cgroup_threshold_ary *new; - u64 usage; + unsigned long usage; int i, j, size; mutex_lock(&memcg->thresholds_lock); @@ -4883,7 +4843,7 @@ static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) memcg_kmem_mark_dead(memcg); - if (res_counter_read_u64(&memcg->kmem, RES_USAGE) != 0) + if (page_counter_read(&memcg->kmem)) return; if (memcg_kmem_test_and_clear_dead(memcg)) @@ -5363,9 +5323,9 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg) */ struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg) { - if (!memcg->res.parent) + if (!memcg->memory.parent) return NULL; - return mem_cgroup_from_res_counter(memcg->res.parent, res); + return mem_cgroup_from_counter(memcg->memory.parent, memory); } EXPORT_SYMBOL(parent_mem_cgroup); @@ -5410,9 +5370,9 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) /* root ? */ if (parent_css == NULL) { root_mem_cgroup = memcg; - res_counter_init(&memcg->res, NULL); - res_counter_init(&memcg->memsw, NULL); - res_counter_init(&memcg->kmem, NULL); + page_counter_init(&memcg->memory, NULL); + page_counter_init(&memcg->memsw, NULL); + page_counter_init(&memcg->kmem, NULL); } memcg->last_scanned_node = MAX_NUMNODES; @@ -5451,18 +5411,18 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) memcg->swappiness = mem_cgroup_swappiness(parent); if (parent->use_hierarchy) { - res_counter_init(&memcg->res, &parent->res); - res_counter_init(&memcg->memsw, &parent->memsw); - res_counter_init(&memcg->kmem, &parent->kmem); + page_counter_init(&memcg->memory, &parent->memory); + page_counter_init(&memcg->memsw, &parent->memsw); + page_counter_init(&memcg->kmem, &parent->kmem); /* * No need to take a reference to the parent because cgroup * core guarantees its existence. */ } else { - res_counter_init(&memcg->res, NULL); - res_counter_init(&memcg->memsw, NULL); - res_counter_init(&memcg->kmem, NULL); + page_counter_init(&memcg->memory, NULL); + page_counter_init(&memcg->memsw, NULL); + page_counter_init(&memcg->kmem, NULL); /* * Deeper hierachy with use_hierarchy == false doesn't make * much sense so let cgroup subsystem know about this @@ -5544,7 +5504,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) /* * XXX: css_offline() would be where we should reparent all * memory to prepare the cgroup for destruction. However, - * memcg does not do css_tryget_online() and res_counter charging + * memcg does not do css_tryget_online() and page_counter charging * under the same RCU lock region, which means that charging * could race with offlining. Offlining only happens to * cgroups with no tasks in them but charges can show up @@ -5564,7 +5524,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css) * call_rcu() * offline_css() * reparent_charges() - * res_counter_charge() + * page_counter_try_charge() * css_put() * css_free() * pc->mem_cgroup = dead memcg @@ -5599,10 +5559,10 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - mem_cgroup_resize_limit(memcg, ULLONG_MAX); - mem_cgroup_resize_memsw_limit(memcg, ULLONG_MAX); - memcg_update_kmem_limit(memcg, ULLONG_MAX); - res_counter_set_soft_limit(&memcg->res, ULLONG_MAX); + mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); + mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); + memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); + memcg->soft_limit = 0; } #ifdef CONFIG_MMU @@ -5916,19 +5876,18 @@ static void __mem_cgroup_clear_mc(void) if (mc.moved_swap) { /* uncharge swap account from the old cgroup */ if (!mem_cgroup_is_root(mc.from)) - res_counter_uncharge(&mc.from->memsw, - PAGE_SIZE * mc.moved_swap); - - for (i = 0; i < mc.moved_swap; i++) - css_put(&mc.from->css); + page_counter_uncharge(&mc.from->memsw, mc.moved_swap); /* - * we charged both to->res and to->memsw, so we should - * uncharge to->res. + * we charged both to->memory and to->memsw, so we + * should uncharge to->memory. */ if (!mem_cgroup_is_root(mc.to)) - res_counter_uncharge(&mc.to->res, - PAGE_SIZE * mc.moved_swap); + page_counter_uncharge(&mc.to->memory, mc.moved_swap); + + for (i = 0; i < mc.moved_swap; i++) + css_put(&mc.from->css); + /* we've already done css_get(mc.to) */ mc.moved_swap = 0; } @@ -6294,7 +6253,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t entry) memcg = mem_cgroup_lookup(id); if (memcg) { if (!mem_cgroup_is_root(memcg)) - res_counter_uncharge(&memcg->memsw, PAGE_SIZE); + page_counter_uncharge(&memcg->memsw, 1); mem_cgroup_swap_statistics(memcg, false); css_put(&memcg->css); } @@ -6460,11 +6419,9 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, if (!mem_cgroup_is_root(memcg)) { if (nr_mem) - res_counter_uncharge(&memcg->res, - nr_mem * PAGE_SIZE); + page_counter_uncharge(&memcg->memory, nr_mem); if (nr_memsw) - res_counter_uncharge(&memcg->memsw, - nr_memsw * PAGE_SIZE); + page_counter_uncharge(&memcg->memsw, nr_memsw); memcg_oom_recover(memcg); } diff --git a/mm/page_counter.c b/mm/page_counter.c new file mode 100644 index 000000000000..f0cbc0825426 --- /dev/null +++ b/mm/page_counter.c @@ -0,0 +1,207 @@ +/* + * Lockless hierarchical page accounting & limiting + * + * Copyright (C) 2014 Red Hat, Inc., Johannes Weiner + */ + +#include +#include +#include +#include +#include +#include +#include + +/** + * page_counter_cancel - take pages out of the local counter + * @counter: counter + * @nr_pages: number of pages to cancel + * + * Returns whether there are remaining pages in the counter. + */ +int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) +{ + long new; + + new = atomic_long_sub_return(nr_pages, &counter->count); + + /* More uncharges than charges? */ + WARN_ON_ONCE(new < 0); + + return new > 0; +} + +/** + * page_counter_charge - hierarchically charge pages + * @counter: counter + * @nr_pages: number of pages to charge + * + * NOTE: This does not consider any configured counter limits. + */ +void page_counter_charge(struct page_counter *counter, unsigned long nr_pages) +{ + struct page_counter *c; + + for (c = counter; c; c = c->parent) { + long new; + + new = atomic_long_add_return(nr_pages, &c->count); + /* + * This is indeed racy, but we can live with some + * inaccuracy in the watermark. + */ + if (new > c->watermark) + c->watermark = new; + } +} + +/** + * page_counter_try_charge - try to hierarchically charge pages + * @counter: counter + * @nr_pages: number of pages to charge + * @fail: points first counter to hit its limit, if any + * + * Returns 0 on success, or -ENOMEM and @fail if the counter or one of + * its ancestors has hit its configured limit. + */ +int page_counter_try_charge(struct page_counter *counter, + unsigned long nr_pages, + struct page_counter **fail) +{ + struct page_counter *c; + + for (c = counter; c; c = c->parent) { + long new; + /* + * Charge speculatively to avoid an expensive CAS. If + * a bigger charge fails, it might falsely lock out a + * racing smaller charge and send it into reclaim + * early, but the error is limited to the difference + * between the two sizes, which is less than 2M/4M in + * case of a THP locking out a regular page charge. + * + * The atomic_long_add_return() implies a full memory + * barrier between incrementing the count and reading + * the limit. When racing with page_counter_limit(), + * we either see the new limit or the setter sees the + * counter has changed and retries. + */ + new = atomic_long_add_return(nr_pages, &c->count); + if (new > c->limit) { + atomic_long_sub(nr_pages, &c->count); + /* + * This is racy, but we can live with some + * inaccuracy in the failcnt. + */ + c->failcnt++; + *fail = c; + goto failed; + } + /* + * Just like with failcnt, we can live with some + * inaccuracy in the watermark. + */ + if (new > c->watermark) + c->watermark = new; + } + return 0; + +failed: + for (c = counter; c != *fail; c = c->parent) + page_counter_cancel(c, nr_pages); + + return -ENOMEM; +} + +/** + * page_counter_uncharge - hierarchically uncharge pages + * @counter: counter + * @nr_pages: number of pages to uncharge + * + * Returns whether there are remaining charges in @counter. + */ +int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) +{ + struct page_counter *c; + int ret = 1; + + for (c = counter; c; c = c->parent) { + int remainder; + + remainder = page_counter_cancel(c, nr_pages); + if (c == counter && !remainder) + ret = 0; + } + + return ret; +} + +/** + * page_counter_limit - limit the number of pages allowed + * @counter: counter + * @limit: limit to set + * + * Returns 0 on success, -EBUSY if the current number of pages on the + * counter already exceeds the specified limit. + * + * The caller must serialize invocations on the same counter. + */ +int page_counter_limit(struct page_counter *counter, unsigned long limit) +{ + for (;;) { + unsigned long old; + long count; + + /* + * Update the limit while making sure that it's not + * below the concurrently-changing counter value. + * + * The xchg implies two full memory barriers before + * and after, so the read-swap-read is ordered and + * ensures coherency with page_counter_try_charge(): + * that function modifies the count before checking + * the limit, so if it sees the old limit, we see the + * modified counter and retry. + */ + count = atomic_long_read(&counter->count); + + if (count > limit) + return -EBUSY; + + old = xchg(&counter->limit, limit); + + if (atomic_long_read(&counter->count) <= count) + return 0; + + counter->limit = old; + cond_resched(); + } +} + +/** + * page_counter_memparse - memparse() for page counter limits + * @buf: string to parse + * @nr_pages: returns the result in number of pages + * + * Returns -EINVAL, or 0 and @nr_pages on success. @nr_pages will be + * limited to %PAGE_COUNTER_MAX. + */ +int page_counter_memparse(const char *buf, unsigned long *nr_pages) +{ + char unlimited[] = "-1"; + char *end; + u64 bytes; + + if (!strncmp(buf, unlimited, sizeof(unlimited))) { + *nr_pages = PAGE_COUNTER_MAX; + return 0; + } + + bytes = memparse(buf, &end); + if (*end != '\0') + return -EINVAL; + + *nr_pages = min(bytes / PAGE_SIZE, (u64)PAGE_COUNTER_MAX); + + return 0; +} diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c index 1d191357bf88..272327134a1b 100644 --- a/net/ipv4/tcp_memcontrol.c +++ b/net/ipv4/tcp_memcontrol.c @@ -9,13 +9,13 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { /* - * The root cgroup does not use res_counters, but rather, + * The root cgroup does not use page_counters, but rather, * rely on the data already collected by the network * subsystem */ - struct res_counter *res_parent = NULL; - struct cg_proto *cg_proto, *parent_cg; struct mem_cgroup *parent = parent_mem_cgroup(memcg); + struct page_counter *counter_parent = NULL; + struct cg_proto *cg_proto, *parent_cg; cg_proto = tcp_prot.proto_cgroup(memcg); if (!cg_proto) @@ -29,9 +29,9 @@ int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss) parent_cg = tcp_prot.proto_cgroup(parent); if (parent_cg) - res_parent = &parent_cg->memory_allocated; + counter_parent = &parent_cg->memory_allocated; - res_counter_init(&cg_proto->memory_allocated, res_parent); + page_counter_init(&cg_proto->memory_allocated, counter_parent); percpu_counter_init(&cg_proto->sockets_allocated, 0, GFP_KERNEL); return 0; @@ -50,7 +50,7 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg) } EXPORT_SYMBOL(tcp_destroy_cgroup); -static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) +static int tcp_update_limit(struct mem_cgroup *memcg, unsigned long nr_pages) { struct cg_proto *cg_proto; int i; @@ -60,20 +60,17 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) if (!cg_proto) return -EINVAL; - if (val > RES_COUNTER_MAX) - val = RES_COUNTER_MAX; - - ret = res_counter_set_limit(&cg_proto->memory_allocated, val); + ret = page_counter_limit(&cg_proto->memory_allocated, nr_pages); if (ret) return ret; for (i = 0; i < 3; i++) - cg_proto->sysctl_mem[i] = min_t(long, val >> PAGE_SHIFT, + cg_proto->sysctl_mem[i] = min_t(long, nr_pages, sysctl_tcp_mem[i]); - if (val == RES_COUNTER_MAX) + if (nr_pages == PAGE_COUNTER_MAX) clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags); - else if (val != RES_COUNTER_MAX) { + else { /* * The active bit needs to be written after the static_key * update. This is what guarantees that the socket activation @@ -102,11 +99,20 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val) return 0; } +enum { + RES_USAGE, + RES_LIMIT, + RES_MAX_USAGE, + RES_FAILCNT, +}; + +static DEFINE_MUTEX(tcp_limit_mutex); + static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { struct mem_cgroup *memcg = mem_cgroup_from_css(of_css(of)); - unsigned long long val; + unsigned long nr_pages; int ret = 0; buf = strstrip(buf); @@ -114,10 +120,12 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, switch (of_cft(of)->private) { case RES_LIMIT: /* see memcontrol.c */ - ret = res_counter_memparse_write_strategy(buf, &val); + ret = page_counter_memparse(buf, &nr_pages); if (ret) break; - ret = tcp_update_limit(memcg, val); + mutex_lock(&tcp_limit_mutex); + ret = tcp_update_limit(memcg, nr_pages); + mutex_unlock(&tcp_limit_mutex); break; default: ret = -EINVAL; @@ -126,43 +134,36 @@ static ssize_t tcp_cgroup_write(struct kernfs_open_file *of, return ret ?: nbytes; } -static u64 tcp_read_stat(struct mem_cgroup *memcg, int type, u64 default_val) -{ - struct cg_proto *cg_proto; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) - return default_val; - - return res_counter_read_u64(&cg_proto->memory_allocated, type); -} - -static u64 tcp_read_usage(struct mem_cgroup *memcg) -{ - struct cg_proto *cg_proto; - - cg_proto = tcp_prot.proto_cgroup(memcg); - if (!cg_proto) - return atomic_long_read(&tcp_memory_allocated) << PAGE_SHIFT; - - return res_counter_read_u64(&cg_proto->memory_allocated, RES_USAGE); -} - static u64 tcp_cgroup_read(struct cgroup_subsys_state *css, struct cftype *cft) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); + struct cg_proto *cg_proto = tcp_prot.proto_cgroup(memcg); u64 val; switch (cft->private) { case RES_LIMIT: - val = tcp_read_stat(memcg, RES_LIMIT, RES_COUNTER_MAX); + if (!cg_proto) + return PAGE_COUNTER_MAX; + val = cg_proto->memory_allocated.limit; + val *= PAGE_SIZE; break; case RES_USAGE: - val = tcp_read_usage(memcg); + if (!cg_proto) + val = atomic_long_read(&tcp_memory_allocated); + else + val = page_counter_read(&cg_proto->memory_allocated); + val *= PAGE_SIZE; break; case RES_FAILCNT: + if (!cg_proto) + return 0; + val = cg_proto->memory_allocated.failcnt; + break; case RES_MAX_USAGE: - val = tcp_read_stat(memcg, cft->private, 0); + if (!cg_proto) + return 0; + val = cg_proto->memory_allocated.watermark; + val *= PAGE_SIZE; break; default: BUG(); @@ -183,10 +184,10 @@ static ssize_t tcp_cgroup_reset(struct kernfs_open_file *of, switch (of_cft(of)->private) { case RES_MAX_USAGE: - res_counter_reset_max(&cg_proto->memory_allocated); + page_counter_reset_watermark(&cg_proto->memory_allocated); break; case RES_FAILCNT: - res_counter_reset_failcnt(&cg_proto->memory_allocated); + cg_proto->memory_allocated.failcnt = 0; break; } -- cgit v1.2.3 From 71f87bee38edddb21d97895fa938744cf3f477bb Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:34 -0800 Subject: mm: hugetlb_cgroup: convert to lockless page counters Abandon the spinlock-protected byte counters in favor of the unlocked page counters in the hugetlb controller as well. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Cc: Tejun Heo Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/cgroups/hugetlb.txt | 2 +- include/linux/hugetlb_cgroup.h | 1 - init/Kconfig | 3 +- mm/hugetlb_cgroup.c | 103 +++++++++++++++++++++----------------- 4 files changed, 61 insertions(+), 48 deletions(-) (limited to 'mm') diff --git a/Documentation/cgroups/hugetlb.txt b/Documentation/cgroups/hugetlb.txt index a9faaca1f029..106245c3aecc 100644 --- a/Documentation/cgroups/hugetlb.txt +++ b/Documentation/cgroups/hugetlb.txt @@ -29,7 +29,7 @@ Brief summary of control files hugetlb..limit_in_bytes # set/show limit of "hugepagesize" hugetlb usage hugetlb..max_usage_in_bytes # show max "hugepagesize" hugetlb usage recorded - hugetlb..usage_in_bytes # show current res_counter usage for "hugepagesize" hugetlb + hugetlb..usage_in_bytes # show current usage for "hugepagesize" hugetlb hugetlb..failcnt # show the number of allocation failure due to HugeTLB limit For a system supporting two hugepage size (16M and 16G) the control diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h index 0129f89cf98d..bcc853eccc85 100644 --- a/include/linux/hugetlb_cgroup.h +++ b/include/linux/hugetlb_cgroup.h @@ -16,7 +16,6 @@ #define _LINUX_HUGETLB_CGROUP_H #include -#include struct hugetlb_cgroup; /* diff --git a/init/Kconfig b/init/Kconfig index fd9e88791ba4..a60d1442d1df 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -1051,7 +1051,8 @@ config MEMCG_KMEM config CGROUP_HUGETLB bool "HugeTLB Resource Controller for Control Groups" - depends on RESOURCE_COUNTERS && HUGETLB_PAGE + depends on HUGETLB_PAGE + select PAGE_COUNTER default n help Provides a cgroup Resource Controller for HugeTLB pages. diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index a67c26e0f360..037e1c00a5b7 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -14,6 +14,7 @@ */ #include +#include #include #include #include @@ -23,7 +24,7 @@ struct hugetlb_cgroup { /* * the counter to account for hugepages from hugetlb. */ - struct res_counter hugepage[HUGE_MAX_HSTATE]; + struct page_counter hugepage[HUGE_MAX_HSTATE]; }; #define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) @@ -60,7 +61,7 @@ static inline bool hugetlb_cgroup_have_usage(struct hugetlb_cgroup *h_cg) int idx; for (idx = 0; idx < hugetlb_max_hstate; idx++) { - if ((res_counter_read_u64(&h_cg->hugepage[idx], RES_USAGE)) > 0) + if (page_counter_read(&h_cg->hugepage[idx])) return true; } return false; @@ -79,12 +80,12 @@ hugetlb_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (parent_h_cgroup) { for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) - res_counter_init(&h_cgroup->hugepage[idx], - &parent_h_cgroup->hugepage[idx]); + page_counter_init(&h_cgroup->hugepage[idx], + &parent_h_cgroup->hugepage[idx]); } else { root_h_cgroup = h_cgroup; for (idx = 0; idx < HUGE_MAX_HSTATE; idx++) - res_counter_init(&h_cgroup->hugepage[idx], NULL); + page_counter_init(&h_cgroup->hugepage[idx], NULL); } return &h_cgroup->css; } @@ -108,9 +109,8 @@ static void hugetlb_cgroup_css_free(struct cgroup_subsys_state *css) static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg, struct page *page) { - int csize; - struct res_counter *counter; - struct res_counter *fail_res; + unsigned int nr_pages; + struct page_counter *counter; struct hugetlb_cgroup *page_hcg; struct hugetlb_cgroup *parent = parent_hugetlb_cgroup(h_cg); @@ -123,15 +123,15 @@ static void hugetlb_cgroup_move_parent(int idx, struct hugetlb_cgroup *h_cg, if (!page_hcg || page_hcg != h_cg) goto out; - csize = PAGE_SIZE << compound_order(page); + nr_pages = 1 << compound_order(page); if (!parent) { parent = root_h_cgroup; /* root has no limit */ - res_counter_charge_nofail(&parent->hugepage[idx], - csize, &fail_res); + page_counter_charge(&parent->hugepage[idx], nr_pages); } counter = &h_cg->hugepage[idx]; - res_counter_uncharge_until(counter, counter->parent, csize); + /* Take the pages off the local counter */ + page_counter_cancel(counter, nr_pages); set_hugetlb_cgroup(page, parent); out: @@ -166,9 +166,8 @@ int hugetlb_cgroup_charge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup **ptr) { int ret = 0; - struct res_counter *fail_res; + struct page_counter *counter; struct hugetlb_cgroup *h_cg = NULL; - unsigned long csize = nr_pages * PAGE_SIZE; if (hugetlb_cgroup_disabled()) goto done; @@ -187,7 +186,7 @@ again: } rcu_read_unlock(); - ret = res_counter_charge(&h_cg->hugepage[idx], csize, &fail_res); + ret = page_counter_try_charge(&h_cg->hugepage[idx], nr_pages, &counter); css_put(&h_cg->css); done: *ptr = h_cg; @@ -213,7 +212,6 @@ void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, struct page *page) { struct hugetlb_cgroup *h_cg; - unsigned long csize = nr_pages * PAGE_SIZE; if (hugetlb_cgroup_disabled()) return; @@ -222,61 +220,76 @@ void hugetlb_cgroup_uncharge_page(int idx, unsigned long nr_pages, if (unlikely(!h_cg)) return; set_hugetlb_cgroup(page, NULL); - res_counter_uncharge(&h_cg->hugepage[idx], csize); + page_counter_uncharge(&h_cg->hugepage[idx], nr_pages); return; } void hugetlb_cgroup_uncharge_cgroup(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg) { - unsigned long csize = nr_pages * PAGE_SIZE; - if (hugetlb_cgroup_disabled() || !h_cg) return; if (huge_page_order(&hstates[idx]) < HUGETLB_CGROUP_MIN_ORDER) return; - res_counter_uncharge(&h_cg->hugepage[idx], csize); + page_counter_uncharge(&h_cg->hugepage[idx], nr_pages); return; } +enum { + RES_USAGE, + RES_LIMIT, + RES_MAX_USAGE, + RES_FAILCNT, +}; + static u64 hugetlb_cgroup_read_u64(struct cgroup_subsys_state *css, struct cftype *cft) { - int idx, name; + struct page_counter *counter; struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(css); - idx = MEMFILE_IDX(cft->private); - name = MEMFILE_ATTR(cft->private); + counter = &h_cg->hugepage[MEMFILE_IDX(cft->private)]; - return res_counter_read_u64(&h_cg->hugepage[idx], name); + switch (MEMFILE_ATTR(cft->private)) { + case RES_USAGE: + return (u64)page_counter_read(counter) * PAGE_SIZE; + case RES_LIMIT: + return (u64)counter->limit * PAGE_SIZE; + case RES_MAX_USAGE: + return (u64)counter->watermark * PAGE_SIZE; + case RES_FAILCNT: + return counter->failcnt; + default: + BUG(); + } } +static DEFINE_MUTEX(hugetlb_limit_mutex); + static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { - int idx, name, ret; - unsigned long long val; + int ret, idx; + unsigned long nr_pages; struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of)); + if (hugetlb_cgroup_is_root(h_cg)) /* Can't set limit on root */ + return -EINVAL; + buf = strstrip(buf); + ret = page_counter_memparse(buf, &nr_pages); + if (ret) + return ret; + idx = MEMFILE_IDX(of_cft(of)->private); - name = MEMFILE_ATTR(of_cft(of)->private); - switch (name) { + switch (MEMFILE_ATTR(of_cft(of)->private)) { case RES_LIMIT: - if (hugetlb_cgroup_is_root(h_cg)) { - /* Can't set limit on root */ - ret = -EINVAL; - break; - } - /* This function does all necessary parse...reuse it */ - ret = res_counter_memparse_write_strategy(buf, &val); - if (ret) - break; - val = ALIGN(val, 1ULL << huge_page_shift(&hstates[idx])); - ret = res_counter_set_limit(&h_cg->hugepage[idx], val); + mutex_lock(&hugetlb_limit_mutex); + ret = page_counter_limit(&h_cg->hugepage[idx], nr_pages); + mutex_unlock(&hugetlb_limit_mutex); break; default: ret = -EINVAL; @@ -288,18 +301,18 @@ static ssize_t hugetlb_cgroup_write(struct kernfs_open_file *of, static ssize_t hugetlb_cgroup_reset(struct kernfs_open_file *of, char *buf, size_t nbytes, loff_t off) { - int idx, name, ret = 0; + int ret = 0; + struct page_counter *counter; struct hugetlb_cgroup *h_cg = hugetlb_cgroup_from_css(of_css(of)); - idx = MEMFILE_IDX(of_cft(of)->private); - name = MEMFILE_ATTR(of_cft(of)->private); + counter = &h_cg->hugepage[MEMFILE_IDX(of_cft(of)->private)]; - switch (name) { + switch (MEMFILE_ATTR(of_cft(of)->private)) { case RES_MAX_USAGE: - res_counter_reset_max(&h_cg->hugepage[idx]); + page_counter_reset_watermark(counter); break; case RES_FAILCNT: - res_counter_reset_failcnt(&h_cg->hugepage[idx]); + counter->failcnt = 0; break; default: ret = -EINVAL; -- cgit v1.2.3 From 5ac8fb31ad2ebd6492d1c5e8f31846b532f03945 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:39 -0800 Subject: mm: memcontrol: convert reclaim iterator to simple css refcounting The memcg reclaim iterators use a complicated weak reference scheme to prevent pinning cgroups indefinitely in the absence of memory pressure. However, during the ongoing cgroup core rework, css lifetime has been decoupled such that a pinned css no longer interferes with removal of the user-visible cgroup, and all this complexity is now unnecessary. [mhocko@suse.cz: ensure that the cached reference is always released] Signed-off-by: Johannes Weiner Cc: Vladimir Davydov Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 258 ++++++++++++++++++-------------------------------------- 1 file changed, 84 insertions(+), 174 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 4129ad74e93b..c3cd3bb77dd9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -143,14 +143,8 @@ struct mem_cgroup_stat_cpu { unsigned long targets[MEM_CGROUP_NTARGETS]; }; -struct mem_cgroup_reclaim_iter { - /* - * last scanned hierarchy member. Valid only if last_dead_count - * matches memcg->dead_count of the hierarchy root group. - */ - struct mem_cgroup *last_visited; - int last_dead_count; - +struct reclaim_iter { + struct mem_cgroup *position; /* scan generation, increased every round-trip */ unsigned int generation; }; @@ -162,7 +156,7 @@ struct mem_cgroup_per_zone { struct lruvec lruvec; unsigned long lru_size[NR_LRU_LISTS]; - struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; + struct reclaim_iter iter[DEF_PRIORITY + 1]; struct rb_node tree_node; /* RB tree node */ unsigned long usage_in_excess;/* Set to the value by which */ @@ -346,7 +340,6 @@ struct mem_cgroup { struct mem_cgroup_stat_cpu nocpu_base; spinlock_t pcp_counter_lock; - atomic_t dead_count; #if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET) struct cg_proto tcp_mem; #endif @@ -1067,122 +1060,6 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm) return memcg; } -/* - * Returns a next (in a pre-order walk) alive memcg (with elevated css - * ref. count) or NULL if the whole root's subtree has been visited. - * - * helper function to be used by mem_cgroup_iter - */ -static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root, - struct mem_cgroup *last_visited) -{ - struct cgroup_subsys_state *prev_css, *next_css; - - prev_css = last_visited ? &last_visited->css : NULL; -skip_node: - next_css = css_next_descendant_pre(prev_css, &root->css); - - /* - * Even if we found a group we have to make sure it is - * alive. css && !memcg means that the groups should be - * skipped and we should continue the tree walk. - * last_visited css is safe to use because it is - * protected by css_get and the tree walk is rcu safe. - * - * We do not take a reference on the root of the tree walk - * because we might race with the root removal when it would - * be the only node in the iterated hierarchy and mem_cgroup_iter - * would end up in an endless loop because it expects that at - * least one valid node will be returned. Root cannot disappear - * because caller of the iterator should hold it already so - * skipping css reference should be safe. - */ - if (next_css) { - struct mem_cgroup *memcg = mem_cgroup_from_css(next_css); - - if (next_css == &root->css) - return memcg; - - if (css_tryget_online(next_css)) { - /* - * Make sure the memcg is initialized: - * mem_cgroup_css_online() orders the the - * initialization against setting the flag. - */ - if (smp_load_acquire(&memcg->initialized)) - return memcg; - css_put(next_css); - } - - prev_css = next_css; - goto skip_node; - } - - return NULL; -} - -static void mem_cgroup_iter_invalidate(struct mem_cgroup *root) -{ - /* - * When a group in the hierarchy below root is destroyed, the - * hierarchy iterator can no longer be trusted since it might - * have pointed to the destroyed group. Invalidate it. - */ - atomic_inc(&root->dead_count); -} - -static struct mem_cgroup * -mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter, - struct mem_cgroup *root, - int *sequence) -{ - struct mem_cgroup *position = NULL; - /* - * A cgroup destruction happens in two stages: offlining and - * release. They are separated by a RCU grace period. - * - * If the iterator is valid, we may still race with an - * offlining. The RCU lock ensures the object won't be - * released, tryget will fail if we lost the race. - */ - *sequence = atomic_read(&root->dead_count); - if (iter->last_dead_count == *sequence) { - smp_rmb(); - position = iter->last_visited; - - /* - * We cannot take a reference to root because we might race - * with root removal and returning NULL would end up in - * an endless loop on the iterator user level when root - * would be returned all the time. - */ - if (position && position != root && - !css_tryget_online(&position->css)) - position = NULL; - } - return position; -} - -static void mem_cgroup_iter_update(struct mem_cgroup_reclaim_iter *iter, - struct mem_cgroup *last_visited, - struct mem_cgroup *new_position, - struct mem_cgroup *root, - int sequence) -{ - /* root reference counting symmetric to mem_cgroup_iter_load */ - if (last_visited && last_visited != root) - css_put(&last_visited->css); - /* - * We store the sequence count from the time @last_visited was - * loaded successfully instead of rereading it here so that we - * don't lose destruction events in between. We could have - * raced with the destruction of @new_position after all. - */ - iter->last_visited = new_position; - smp_wmb(); - iter->last_dead_count = sequence; -} - /** * mem_cgroup_iter - iterate over memory cgroup hierarchy * @root: hierarchy root @@ -1204,8 +1081,10 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, struct mem_cgroup *prev, struct mem_cgroup_reclaim_cookie *reclaim) { + struct reclaim_iter *uninitialized_var(iter); + struct cgroup_subsys_state *css = NULL; struct mem_cgroup *memcg = NULL; - struct mem_cgroup *last_visited = NULL; + struct mem_cgroup *pos = NULL; if (mem_cgroup_disabled()) return NULL; @@ -1214,50 +1093,101 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, root = root_mem_cgroup; if (prev && !reclaim) - last_visited = prev; + pos = prev; if (!root->use_hierarchy && root != root_mem_cgroup) { if (prev) - goto out_css_put; + goto out; return root; } rcu_read_lock(); - while (!memcg) { - struct mem_cgroup_reclaim_iter *uninitialized_var(iter); - int uninitialized_var(seq); - - if (reclaim) { - struct mem_cgroup_per_zone *mz; - - mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); - iter = &mz->reclaim_iter[reclaim->priority]; - if (prev && reclaim->generation != iter->generation) { - iter->last_visited = NULL; - goto out_unlock; - } - last_visited = mem_cgroup_iter_load(iter, root, &seq); + if (reclaim) { + struct mem_cgroup_per_zone *mz; + + mz = mem_cgroup_zone_zoneinfo(root, reclaim->zone); + iter = &mz->iter[reclaim->priority]; + + if (prev && reclaim->generation != iter->generation) + goto out_unlock; + + do { + pos = ACCESS_ONCE(iter->position); + /* + * A racing update may change the position and + * put the last reference, hence css_tryget(), + * or retry to see the updated position. + */ + } while (pos && !css_tryget(&pos->css)); + } + + if (pos) + css = &pos->css; + + for (;;) { + css = css_next_descendant_pre(css, &root->css); + if (!css) { + /* + * Reclaimers share the hierarchy walk, and a + * new one might jump in right at the end of + * the hierarchy - make sure they see at least + * one group and restart from the beginning. + */ + if (!prev) + continue; + break; } - memcg = __mem_cgroup_iter_next(root, last_visited); + /* + * Verify the css and acquire a reference. The root + * is provided by the caller, so we know it's alive + * and kicking, and don't take an extra reference. + */ + memcg = mem_cgroup_from_css(css); - if (reclaim) { - mem_cgroup_iter_update(iter, last_visited, memcg, root, - seq); + if (css == &root->css) + break; - if (!memcg) - iter->generation++; - else if (!prev && memcg) - reclaim->generation = iter->generation; + if (css_tryget_online(css)) { + /* + * Make sure the memcg is initialized: + * mem_cgroup_css_online() orders the the + * initialization against setting the flag. + */ + if (smp_load_acquire(&memcg->initialized)) + break; + + css_put(css); } - if (prev && !memcg) - goto out_unlock; + memcg = NULL; + } + + if (reclaim) { + if (cmpxchg(&iter->position, pos, memcg) == pos) { + if (memcg) + css_get(&memcg->css); + if (pos) + css_put(&pos->css); + } + + /* + * pairs with css_tryget when dereferencing iter->position + * above. + */ + if (pos) + css_put(&pos->css); + + if (!memcg) + iter->generation++; + else if (!prev) + reclaim->generation = iter->generation; } + out_unlock: rcu_read_unlock(); -out_css_put: +out: if (prev && prev != root) css_put(&prev->css); @@ -5447,24 +5377,6 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) return 0; } -/* - * Announce all parents that a group from their hierarchy is gone. - */ -static void mem_cgroup_invalidate_reclaim_iterators(struct mem_cgroup *memcg) -{ - struct mem_cgroup *parent = memcg; - - while ((parent = parent_mem_cgroup(parent))) - mem_cgroup_iter_invalidate(parent); - - /* - * if the root memcg is not hierarchical we have to check it - * explicitely. - */ - if (!root_mem_cgroup->use_hierarchy) - mem_cgroup_iter_invalidate(root_mem_cgroup); -} - static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); @@ -5485,8 +5397,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) kmem_cgroup_css_offline(memcg); - mem_cgroup_invalidate_reclaim_iterators(memcg); - /* * This requires that offlining is serialized. Right now that is * guaranteed because css_killed_work_fn() holds the cgroup_mutex. -- cgit v1.2.3 From e8ea14cc6eadfe2ea63e9989e16e62625a2619f8 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:42 -0800 Subject: mm: memcontrol: take a css reference for each charged page Charges currently pin the css indirectly by playing tricks during css_offline(): user pages stall the offlining process until all of them have been reparented, whereas kmemcg acquires a keep-alive reference if outstanding kernel pages are detected at that point. In preparation for removing all this complexity, make the pinning explicit and acquire a css references for every charged page. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/cgroup.h | 26 +++++++++++++++++++++++ include/linux/percpu-refcount.h | 47 +++++++++++++++++++++++++++++++++-------- mm/memcontrol.c | 21 ++++++++++++++---- 3 files changed, 81 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 1d5196889048..9f96b25965c2 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -112,6 +112,19 @@ static inline void css_get(struct cgroup_subsys_state *css) percpu_ref_get(&css->refcnt); } +/** + * css_get_many - obtain references on the specified css + * @css: target css + * @n: number of references to get + * + * The caller must already have a reference. + */ +static inline void css_get_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_get_many(&css->refcnt, n); +} + /** * css_tryget - try to obtain a reference on the specified css * @css: target css @@ -159,6 +172,19 @@ static inline void css_put(struct cgroup_subsys_state *css) percpu_ref_put(&css->refcnt); } +/** + * css_put_many - put css references + * @css: target css + * @n: number of references to put + * + * Put references obtained via css_get() and css_tryget_online(). + */ +static inline void css_put_many(struct cgroup_subsys_state *css, unsigned int n) +{ + if (!(css->flags & CSS_NO_REF)) + percpu_ref_put_many(&css->refcnt, n); +} + /* bits in struct cgroup flags field */ enum { /* Control Group requires release notifications to userspace */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 51ce60c35f4c..530b249f7ea4 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -147,27 +147,41 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, } /** - * percpu_ref_get - increment a percpu refcount + * percpu_ref_get_many - increment a percpu refcount * @ref: percpu_ref to get + * @nr: number of references to get * - * Analagous to atomic_long_inc(). + * Analogous to atomic_long_add(). * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_get(struct percpu_ref *ref) +static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_inc(*percpu_count); + this_cpu_add(*percpu_count, nr); else - atomic_long_inc(&ref->count); + atomic_long_add(nr, &ref->count); rcu_read_unlock_sched(); } +/** + * percpu_ref_get - increment a percpu refcount + * @ref: percpu_ref to get + * + * Analagous to atomic_long_inc(). + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_get(struct percpu_ref *ref) +{ + percpu_ref_get_many(ref, 1); +} + /** * percpu_ref_tryget - try to increment a percpu refcount * @ref: percpu_ref to try-get @@ -231,28 +245,43 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) } /** - * percpu_ref_put - decrement a percpu refcount + * percpu_ref_put_many - decrement a percpu refcount * @ref: percpu_ref to put + * @nr: number of references to put * * Decrement the refcount, and if 0, call the release function (which was passed * to percpu_ref_init()) * * This function is safe to call as long as @ref is between init and exit. */ -static inline void percpu_ref_put(struct percpu_ref *ref) +static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr) { unsigned long __percpu *percpu_count; rcu_read_lock_sched(); if (__ref_is_percpu(ref, &percpu_count)) - this_cpu_dec(*percpu_count); - else if (unlikely(atomic_long_dec_and_test(&ref->count))) + this_cpu_sub(*percpu_count, nr); + else if (unlikely(atomic_long_sub_and_test(nr, &ref->count))) ref->release(ref); rcu_read_unlock_sched(); } +/** + * percpu_ref_put - decrement a percpu refcount + * @ref: percpu_ref to put + * + * Decrement the refcount, and if 0, call the release function (which was passed + * to percpu_ref_init()) + * + * This function is safe to call as long as @ref is between init and exit. + */ +static inline void percpu_ref_put(struct percpu_ref *ref) +{ + percpu_ref_put_many(ref, 1); +} + /** * percpu_ref_is_zero - test whether a percpu refcount reached zero * @ref: percpu_ref to test diff --git a/mm/memcontrol.c b/mm/memcontrol.c index c3cd3bb77dd9..f69da2ac6323 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2273,6 +2273,7 @@ static void drain_stock(struct memcg_stock_pcp *stock) page_counter_uncharge(&old->memory, stock->nr_pages); if (do_swap_account) page_counter_uncharge(&old->memsw, stock->nr_pages); + css_put_many(&old->css, stock->nr_pages); stock->nr_pages = 0; } stock->cached = NULL; @@ -2530,6 +2531,7 @@ bypass: return -EINTR; done_restock: + css_get_many(&memcg->css, batch); if (batch > nr_pages) refill_stock(memcg, batch - nr_pages); done: @@ -2544,6 +2546,8 @@ static void cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages) page_counter_uncharge(&memcg->memory, nr_pages); if (do_swap_account) page_counter_uncharge(&memcg->memsw, nr_pages); + + css_put_many(&memcg->css, nr_pages); } /* @@ -2739,6 +2743,7 @@ static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, page_counter_charge(&memcg->memory, nr_pages); if (do_swap_account) page_counter_charge(&memcg->memsw, nr_pages); + css_get_many(&memcg->css, nr_pages); ret = 0; } else if (ret) page_counter_uncharge(&memcg->kmem, nr_pages); @@ -2754,8 +2759,10 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, page_counter_uncharge(&memcg->memsw, nr_pages); /* Not down to 0 */ - if (page_counter_uncharge(&memcg->kmem, nr_pages)) + if (page_counter_uncharge(&memcg->kmem, nr_pages)) { + css_put_many(&memcg->css, nr_pages); return; + } /* * Releases a reference taken in kmem_cgroup_css_offline in case @@ -2767,6 +2774,8 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, */ if (memcg_kmem_test_and_clear_dead(memcg)) css_put(&memcg->css); + + css_put_many(&memcg->css, nr_pages); } /* @@ -3394,10 +3403,13 @@ static int mem_cgroup_move_parent(struct page *page, ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent); if (!ret) { + if (!mem_cgroup_is_root(parent)) + css_get_many(&parent->css, nr_pages); /* Take charge off the local counters */ page_counter_cancel(&child->memory, nr_pages); if (do_swap_account) page_counter_cancel(&child->memsw, nr_pages); + css_put_many(&child->css, nr_pages); } if (nr_pages > 1) @@ -5767,7 +5779,6 @@ static void __mem_cgroup_clear_mc(void) { struct mem_cgroup *from = mc.from; struct mem_cgroup *to = mc.to; - int i; /* we must uncharge all the leftover precharges from mc.to */ if (mc.precharge) { @@ -5795,8 +5806,7 @@ static void __mem_cgroup_clear_mc(void) if (!mem_cgroup_is_root(mc.to)) page_counter_uncharge(&mc.to->memory, mc.moved_swap); - for (i = 0; i < mc.moved_swap; i++) - css_put(&mc.from->css); + css_put_many(&mc.from->css, mc.moved_swap); /* we've already done css_get(mc.to) */ mc.moved_swap = 0; @@ -6343,6 +6353,9 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, __this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file); memcg_check_events(memcg, dummy_page); local_irq_restore(flags); + + if (!mem_cgroup_is_root(memcg)) + css_put_many(&memcg->css, max(nr_mem, nr_memsw)); } static void uncharge_list(struct list_head *page_list) -- cgit v1.2.3 From 64f2199389414341ed3a570663f23616c131ba25 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:45 -0800 Subject: mm: memcontrol: remove obsolete kmemcg pinning tricks As charges now pin the css explicitely, there is no more need for kmemcg to acquire a proxy reference for outstanding pages during offlining, or maintain state to identify such "dead" groups. This was the last user of the uncharge functions' return values, so remove them as well. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_counter.h | 4 +-- mm/memcontrol.c | 74 +------------------------------------------- mm/page_counter.c | 23 +++----------- 3 files changed, 7 insertions(+), 94 deletions(-) (limited to 'mm') diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 7cce3be99ff3..955421575d16 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -34,12 +34,12 @@ static inline unsigned long page_counter_read(struct page_counter *counter) return atomic_long_read(&counter->count); } -int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages); void page_counter_charge(struct page_counter *counter, unsigned long nr_pages); int page_counter_try_charge(struct page_counter *counter, unsigned long nr_pages, struct page_counter **fail); -int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages); int page_counter_limit(struct page_counter *counter, unsigned long limit); int page_counter_memparse(const char *buf, unsigned long *nr_pages); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f69da2ac6323..0e6484ea268d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -369,7 +369,6 @@ struct mem_cgroup { /* internal only representation about the status of kmem accounting. */ enum { KMEM_ACCOUNTED_ACTIVE, /* accounted by this cgroup itself */ - KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ }; #ifdef CONFIG_MEMCG_KMEM @@ -383,22 +382,6 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg) return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); } -static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) -{ - /* - * Our caller must use css_get() first, because memcg_uncharge_kmem() - * will call css_put() if it sees the memcg is dead. - */ - smp_wmb(); - if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) - set_bit(KMEM_ACCOUNTED_DEAD, &memcg->kmem_account_flags); -} - -static bool memcg_kmem_test_and_clear_dead(struct mem_cgroup *memcg) -{ - return test_and_clear_bit(KMEM_ACCOUNTED_DEAD, - &memcg->kmem_account_flags); -} #endif /* Stuffs for move charges at task migration. */ @@ -2758,22 +2741,7 @@ static void memcg_uncharge_kmem(struct mem_cgroup *memcg, if (do_swap_account) page_counter_uncharge(&memcg->memsw, nr_pages); - /* Not down to 0 */ - if (page_counter_uncharge(&memcg->kmem, nr_pages)) { - css_put_many(&memcg->css, nr_pages); - return; - } - - /* - * Releases a reference taken in kmem_cgroup_css_offline in case - * this last uncharge is racing with the offlining code or it is - * outliving the memcg existence. - * - * The memory barrier imposed by test&clear is paired with the - * explicit one in memcg_kmem_mark_dead(). - */ - if (memcg_kmem_test_and_clear_dead(memcg)) - css_put(&memcg->css); + page_counter_uncharge(&memcg->kmem, nr_pages); css_put_many(&memcg->css, nr_pages); } @@ -4757,40 +4725,6 @@ static void memcg_destroy_kmem(struct mem_cgroup *memcg) { mem_cgroup_sockets_destroy(memcg); } - -static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) -{ - if (!memcg_kmem_is_active(memcg)) - return; - - /* - * kmem charges can outlive the cgroup. In the case of slab - * pages, for instance, a page contain objects from various - * processes. As we prevent from taking a reference for every - * such allocation we have to be careful when doing uncharge - * (see memcg_uncharge_kmem) and here during offlining. - * - * The idea is that that only the _last_ uncharge which sees - * the dead memcg will drop the last reference. An additional - * reference is taken here before the group is marked dead - * which is then paired with css_put during uncharge resp. here. - * - * Although this might sound strange as this path is called from - * css_offline() when the referencemight have dropped down to 0 and - * shouldn't be incremented anymore (css_tryget_online() would - * fail) we do not have other options because of the kmem - * allocations lifetime. - */ - css_get(&memcg->css); - - memcg_kmem_mark_dead(memcg); - - if (page_counter_read(&memcg->kmem)) - return; - - if (memcg_kmem_test_and_clear_dead(memcg)) - css_put(&memcg->css); -} #else static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) { @@ -4800,10 +4734,6 @@ static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) static void memcg_destroy_kmem(struct mem_cgroup *memcg) { } - -static void kmem_cgroup_css_offline(struct mem_cgroup *memcg) -{ -} #endif /* @@ -5407,8 +5337,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) } spin_unlock(&memcg->event_list_lock); - kmem_cgroup_css_offline(memcg); - /* * This requires that offlining is serialized. Right now that is * guaranteed because css_killed_work_fn() holds the cgroup_mutex. diff --git a/mm/page_counter.c b/mm/page_counter.c index f0cbc0825426..a009574fbba9 100644 --- a/mm/page_counter.c +++ b/mm/page_counter.c @@ -16,19 +16,14 @@ * page_counter_cancel - take pages out of the local counter * @counter: counter * @nr_pages: number of pages to cancel - * - * Returns whether there are remaining pages in the counter. */ -int page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) +void page_counter_cancel(struct page_counter *counter, unsigned long nr_pages) { long new; new = atomic_long_sub_return(nr_pages, &counter->count); - /* More uncharges than charges? */ WARN_ON_ONCE(new < 0); - - return new > 0; } /** @@ -117,23 +112,13 @@ failed: * page_counter_uncharge - hierarchically uncharge pages * @counter: counter * @nr_pages: number of pages to uncharge - * - * Returns whether there are remaining charges in @counter. */ -int page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) +void page_counter_uncharge(struct page_counter *counter, unsigned long nr_pages) { struct page_counter *c; - int ret = 1; - for (c = counter; c; c = c->parent) { - int remainder; - - remainder = page_counter_cancel(c, nr_pages); - if (c == counter && !remainder) - ret = 0; - } - - return ret; + for (c = counter; c; c = c->parent) + page_counter_cancel(c, nr_pages); } /** -- cgit v1.2.3 From b2052564e66da2f0551d34a09488411919cfa14d Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:48 -0800 Subject: mm: memcontrol: continue cache reclaim from offlined groups On cgroup deletion, outstanding page cache charges are moved to the parent group so that they're not lost and can be reclaimed during pressure on/inside said parent. But this reparenting is fairly tricky and its synchroneous nature has led to several lock-ups in the past. Since c2931b70a32c ("cgroup: iterate cgroup_subsys_states directly") css iterators now also include offlined css, so memcg iterators can be changed to include offlined children during reclaim of a group, and leftover cache can just stay put. There is a slight change of behavior in that charges of deleted groups no longer show up as local charges in the parent. But they are still included in the parent's hierarchical statistics. Signed-off-by: Johannes Weiner Acked-by: Vladimir Davydov Acked-by: Michal Hocko Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 218 +------------------------------------------------------- 1 file changed, 1 insertion(+), 217 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 0e6484ea268d..f90e43c1499f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1132,7 +1132,7 @@ struct mem_cgroup *mem_cgroup_iter(struct mem_cgroup *root, if (css == &root->css) break; - if (css_tryget_online(css)) { + if (css_tryget(css)) { /* * Make sure the memcg is initialized: * mem_cgroup_css_online() orders the the @@ -3316,79 +3316,6 @@ out: return ret; } -/** - * mem_cgroup_move_parent - moves page to the parent group - * @page: the page to move - * @pc: page_cgroup of the page - * @child: page's cgroup - * - * move charges to its parent or the root cgroup if the group has no - * parent (aka use_hierarchy==0). - * Although this might fail (get_page_unless_zero, isolate_lru_page or - * mem_cgroup_move_account fails) the failure is always temporary and - * it signals a race with a page removal/uncharge or migration. In the - * first case the page is on the way out and it will vanish from the LRU - * on the next attempt and the call should be retried later. - * Isolation from the LRU fails only if page has been isolated from - * the LRU since we looked at it and that usually means either global - * reclaim or migration going on. The page will either get back to the - * LRU or vanish. - * Finaly mem_cgroup_move_account fails only if the page got uncharged - * (!PageCgroupUsed) or moved to a different group. The page will - * disappear in the next attempt. - */ -static int mem_cgroup_move_parent(struct page *page, - struct page_cgroup *pc, - struct mem_cgroup *child) -{ - struct mem_cgroup *parent; - unsigned int nr_pages; - unsigned long uninitialized_var(flags); - int ret; - - VM_BUG_ON(mem_cgroup_is_root(child)); - - ret = -EBUSY; - if (!get_page_unless_zero(page)) - goto out; - if (isolate_lru_page(page)) - goto put; - - nr_pages = hpage_nr_pages(page); - - parent = parent_mem_cgroup(child); - /* - * If no parent, move charges to root cgroup. - */ - if (!parent) - parent = root_mem_cgroup; - - if (nr_pages > 1) { - VM_BUG_ON_PAGE(!PageTransHuge(page), page); - flags = compound_lock_irqsave(page); - } - - ret = mem_cgroup_move_account(page, nr_pages, - pc, child, parent); - if (!ret) { - if (!mem_cgroup_is_root(parent)) - css_get_many(&parent->css, nr_pages); - /* Take charge off the local counters */ - page_counter_cancel(&child->memory, nr_pages); - if (do_swap_account) - page_counter_cancel(&child->memsw, nr_pages); - css_put_many(&child->css, nr_pages); - } - - if (nr_pages > 1) - compound_unlock_irqrestore(page, flags); - putback_lru_page(page); -put: - put_page(page); -out: - return ret; -} - #ifdef CONFIG_MEMCG_SWAP static void mem_cgroup_swap_statistics(struct mem_cgroup *memcg, bool charge) @@ -3682,105 +3609,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, return nr_reclaimed; } -/** - * mem_cgroup_force_empty_list - clears LRU of a group - * @memcg: group to clear - * @node: NUMA node - * @zid: zone id - * @lru: lru to to clear - * - * Traverse a specified page_cgroup list and try to drop them all. This doesn't - * reclaim the pages page themselves - pages are moved to the parent (or root) - * group. - */ -static void mem_cgroup_force_empty_list(struct mem_cgroup *memcg, - int node, int zid, enum lru_list lru) -{ - struct lruvec *lruvec; - unsigned long flags; - struct list_head *list; - struct page *busy; - struct zone *zone; - - zone = &NODE_DATA(node)->node_zones[zid]; - lruvec = mem_cgroup_zone_lruvec(zone, memcg); - list = &lruvec->lists[lru]; - - busy = NULL; - do { - struct page_cgroup *pc; - struct page *page; - - spin_lock_irqsave(&zone->lru_lock, flags); - if (list_empty(list)) { - spin_unlock_irqrestore(&zone->lru_lock, flags); - break; - } - page = list_entry(list->prev, struct page, lru); - if (busy == page) { - list_move(&page->lru, list); - busy = NULL; - spin_unlock_irqrestore(&zone->lru_lock, flags); - continue; - } - spin_unlock_irqrestore(&zone->lru_lock, flags); - - pc = lookup_page_cgroup(page); - - if (mem_cgroup_move_parent(page, pc, memcg)) { - /* found lock contention or "pc" is obsolete. */ - busy = page; - } else - busy = NULL; - cond_resched(); - } while (!list_empty(list)); -} - -/* - * make mem_cgroup's charge to be 0 if there is no task by moving - * all the charges and pages to the parent. - * This enables deleting this mem_cgroup. - * - * Caller is responsible for holding css reference on the memcg. - */ -static void mem_cgroup_reparent_charges(struct mem_cgroup *memcg) -{ - int node, zid; - - do { - /* This is for making all *used* pages to be on LRU. */ - lru_add_drain_all(); - drain_all_stock_sync(memcg); - mem_cgroup_start_move(memcg); - for_each_node_state(node, N_MEMORY) { - for (zid = 0; zid < MAX_NR_ZONES; zid++) { - enum lru_list lru; - for_each_lru(lru) { - mem_cgroup_force_empty_list(memcg, - node, zid, lru); - } - } - } - mem_cgroup_end_move(memcg); - memcg_oom_recover(memcg); - cond_resched(); - - /* - * Kernel memory may not necessarily be trackable to a specific - * process. So they are not migrated, and therefore we can't - * expect their value to drop to 0 here. - * Having res filled up with kmem only is enough. - * - * This is a safety check because mem_cgroup_force_empty_list - * could have raced with mem_cgroup_replace_page_cache callers - * so the lru seemed empty but the page could have been added - * right after the check. RES_USAGE should be safe as we always - * charge before adding to the LRU. - */ - } while (page_counter_read(&memcg->memory) - - page_counter_read(&memcg->kmem) > 0); -} - /* * Test whether @memcg has children, dead or alive. Note that this * function doesn't care whether @memcg has use_hierarchy enabled and @@ -5323,7 +5151,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); struct mem_cgroup_event *event, *tmp; - struct cgroup_subsys_state *iter; /* * Unregister events and notify userspace. @@ -5337,13 +5164,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) } spin_unlock(&memcg->event_list_lock); - /* - * This requires that offlining is serialized. Right now that is - * guaranteed because css_killed_work_fn() holds the cgroup_mutex. - */ - css_for_each_descendant_post(iter, css) - mem_cgroup_reparent_charges(mem_cgroup_from_css(iter)); - memcg_unregister_all_caches(memcg); vmpressure_cleanup(&memcg->vmpressure); } @@ -5351,42 +5171,6 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css) static void mem_cgroup_css_free(struct cgroup_subsys_state *css) { struct mem_cgroup *memcg = mem_cgroup_from_css(css); - /* - * XXX: css_offline() would be where we should reparent all - * memory to prepare the cgroup for destruction. However, - * memcg does not do css_tryget_online() and page_counter charging - * under the same RCU lock region, which means that charging - * could race with offlining. Offlining only happens to - * cgroups with no tasks in them but charges can show up - * without any tasks from the swapin path when the target - * memcg is looked up from the swapout record and not from the - * current task as it usually is. A race like this can leak - * charges and put pages with stale cgroup pointers into - * circulation: - * - * #0 #1 - * lookup_swap_cgroup_id() - * rcu_read_lock() - * mem_cgroup_lookup() - * css_tryget_online() - * rcu_read_unlock() - * disable css_tryget_online() - * call_rcu() - * offline_css() - * reparent_charges() - * page_counter_try_charge() - * css_put() - * css_free() - * pc->mem_cgroup = dead memcg - * add page to lru - * - * The bulk of the charges are still moved in offline_css() to - * avoid pinning a lot of pages in case a long-term reference - * like a swapout record is deferring the css_free() to long - * after offlining. But this makes sure we catch any charges - * made after offlining: - */ - mem_cgroup_reparent_charges(memcg); memcg_destroy_kmem(memcg); __mem_cgroup_free(memcg); -- cgit v1.2.3 From 6d3d6aa22af30580cde0d2e23890027bb47a3544 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:42:50 -0800 Subject: mm: memcontrol: remove synchronous stock draining code With charge reparenting, the last synchronous stock drainer left. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Cc: David Rientjes Cc: Tejun Heo Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 46 ++++++---------------------------------------- 1 file changed, 6 insertions(+), 40 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index f90e43c1499f..3a628435f36a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -634,8 +634,6 @@ static void disarm_static_keys(struct mem_cgroup *memcg) disarm_kmem_keys(memcg); } -static void drain_all_stock_async(struct mem_cgroup *memcg); - static struct mem_cgroup_per_zone * mem_cgroup_zone_zoneinfo(struct mem_cgroup *memcg, struct zone *zone) { @@ -2302,13 +2300,15 @@ static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) /* * Drains all per-CPU charge caches for given root_memcg resp. subtree - * of the hierarchy under it. sync flag says whether we should block - * until the work is done. + * of the hierarchy under it. */ -static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) +static void drain_all_stock(struct mem_cgroup *root_memcg) { int cpu, curcpu; + /* If someone's already draining, avoid adding running more workers. */ + if (!mutex_trylock(&percpu_charge_mutex)) + return; /* Notify other cpus that system-wide "drain" is running */ get_online_cpus(); curcpu = get_cpu(); @@ -2329,41 +2329,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg, bool sync) } } put_cpu(); - - if (!sync) - goto out; - - for_each_online_cpu(cpu) { - struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); - if (test_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) - flush_work(&stock->work); - } -out: put_online_cpus(); -} - -/* - * Tries to drain stocked charges in other cpus. This function is asynchronous - * and just put a work per cpu for draining localy on each cpu. Caller can - * expects some charges will be back later but cannot wait for it. - */ -static void drain_all_stock_async(struct mem_cgroup *root_memcg) -{ - /* - * If someone calls draining, avoid adding more kworker runs. - */ - if (!mutex_trylock(&percpu_charge_mutex)) - return; - drain_all_stock(root_memcg, false); - mutex_unlock(&percpu_charge_mutex); -} - -/* This is a synchronous drain interface. */ -static void drain_all_stock_sync(struct mem_cgroup *root_memcg) -{ - /* called when force_empty is called */ - mutex_lock(&percpu_charge_mutex); - drain_all_stock(root_memcg, true); mutex_unlock(&percpu_charge_mutex); } @@ -2472,7 +2438,7 @@ retry: goto retry; if (!drained) { - drain_all_stock_async(mem_over_limit); + drain_all_stock(mem_over_limit); drained = true; goto retry; } -- cgit v1.2.3 From f88dfff5f160aa43d4a434f8d638c07c82b5ad47 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Wed, 10 Dec 2014 15:42:53 -0800 Subject: mm/page_alloc.c: convert boot printks without log level to pr_info Signed-off-by: Anton Blanchard Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 616a2c956b4b..701fe9018fdc 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3893,14 +3893,14 @@ void __ref build_all_zonelists(pg_data_t *pgdat, struct zone *zone) else page_group_by_mobility_disabled = 0; - printk("Built %i zonelists in %s order, mobility grouping %s. " + pr_info("Built %i zonelists in %s order, mobility grouping %s. " "Total pages: %ld\n", nr_online_nodes, zonelist_order_name[current_zonelist_order], page_group_by_mobility_disabled ? "off" : "on", vm_total_pages); #ifdef CONFIG_NUMA - printk("Policy zone: %s\n", zone_names[policy_zone]); + pr_info("Policy zone: %s\n", zone_names[policy_zone]); #endif } @@ -5334,33 +5334,33 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) find_zone_movable_pfns_for_nodes(); /* Print out the zone ranges */ - printk("Zone ranges:\n"); + pr_info("Zone ranges:\n"); for (i = 0; i < MAX_NR_ZONES; i++) { if (i == ZONE_MOVABLE) continue; - printk(KERN_CONT " %-8s ", zone_names[i]); + pr_info(" %-8s ", zone_names[i]); if (arch_zone_lowest_possible_pfn[i] == arch_zone_highest_possible_pfn[i]) - printk(KERN_CONT "empty\n"); + pr_cont("empty\n"); else - printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n", + pr_cont("[mem %0#10lx-%0#10lx]\n", arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT, (arch_zone_highest_possible_pfn[i] << PAGE_SHIFT) - 1); } /* Print out the PFNs ZONE_MOVABLE begins at in each node */ - printk("Movable zone start for each node\n"); + pr_info("Movable zone start for each node\n"); for (i = 0; i < MAX_NUMNODES; i++) { if (zone_movable_pfn[i]) - printk(" Node %d: %#010lx\n", i, + pr_info(" Node %d: %#010lx\n", i, zone_movable_pfn[i] << PAGE_SHIFT); } /* Print out the early node map */ - printk("Early memory node ranges\n"); + pr_info("Early memory node ranges\n"); for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) - printk(" node %3d: [mem %#010lx-%#010lx]\n", nid, + pr_info(" node %3d: [mem %#010lx-%#010lx]\n", nid, start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1); /* Initialise every node */ @@ -5496,7 +5496,7 @@ void __init mem_init_print_info(const char *str) #undef adj_init_size - printk("Memory: %luK/%luK available " + pr_info("Memory: %luK/%luK available " "(%luK kernel code, %luK rwdata, %luK rodata, " "%luK init, %luK bss, %luK reserved" #ifdef CONFIG_HIGHMEM -- cgit v1.2.3 From 0cbc8533b75b6d8e3416e598e9dbf40d8bcf4e01 Mon Sep 17 00:00:00 2001 From: Pintu Kumar Date: Wed, 10 Dec 2014 15:42:56 -0800 Subject: mm/vmalloc.c: replace printk with pr_warn This patch replaces printk(KERN_WARNING..) with pr_warn. Thus it also reduces one line extra because of formatting. Signed-off-by: Pintu Kumar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmalloc.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 90520af7f186..8a18196fcdff 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -463,8 +463,7 @@ overflow: goto retry; } if (printk_ratelimit()) - printk(KERN_WARNING - "vmap allocation for size %lu failed: " + pr_warn("vmap allocation for size %lu failed: " "use vmalloc= to increase size.\n", size); kfree(va); return ERR_PTR(-EBUSY); -- cgit v1.2.3 From 8612c6639b70c59308cf37ae4790817df9621281 Mon Sep 17 00:00:00 2001 From: Pintu Kumar Date: Wed, 10 Dec 2014 15:42:58 -0800 Subject: mm/vmscan.c: replace printk with pr_err This patch replaces printk(KERN_ERR..) with pr_err found under shrink_slab. Thus it also reduces one line extra because of formatting. Signed-off-by: Pintu Kumar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index dcb47074ae03..59605b7c9970 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -260,8 +260,7 @@ shrink_slab_node(struct shrink_control *shrinkctl, struct shrinker *shrinker, do_div(delta, lru_pages + 1); total_scan += delta; if (total_scan < 0) { - printk(KERN_ERR - "shrink_slab: %pF negative objects to delete nr=%ld\n", + pr_err("shrink_slab: %pF negative objects to delete nr=%ld\n", shrinker->scan_objects, total_scan); total_scan = freeable; } -- cgit v1.2.3 From 93481ff0e5a0c7636359a7ee52248856da5e7859 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:01 -0800 Subject: mm: introduce single zone pcplists drain The functions for draining per-cpu pages back to buddy allocators currently always operate on all zones. There are however several cases where the drain is only needed in the context of a single zone, and spilling other pcplists is a waste of time both due to the extra spilling and later refilling. This patch introduces new zone pointer parameter to drain_all_pages() and changes the dummy parameter of drain_local_pages() to be also a zone pointer. When NULL is passed, the functions operate on all zones as usual. Passing a specific zone pointer reduces the work to the single zone. All callers are updated to pass the NULL pointer in this patch. Conversion to single zone (where appropriate) is done in further patches. Signed-off-by: Vlastimil Babka Cc: Naoya Horiguchi Cc: Mel Gorman Cc: Rik van Riel Cc: Yasuaki Ishimatsu Cc: Zhang Yanfei Cc: Xishi Qiu Cc: Vladimir Davydov Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Marek Szyprowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 4 +-- mm/memory-failure.c | 4 +-- mm/memory_hotplug.c | 4 +-- mm/page_alloc.c | 81 ++++++++++++++++++++++++++++++++++++----------------- mm/page_isolation.c | 2 +- 5 files changed, 63 insertions(+), 32 deletions(-) (limited to 'mm') diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 41b30fd4d041..07d2699cdb51 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -381,8 +381,8 @@ extern void free_kmem_pages(unsigned long addr, unsigned int order); void page_alloc_init(void); void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp); -void drain_all_pages(void); -void drain_local_pages(void *dummy); +void drain_all_pages(struct zone *zone); +void drain_local_pages(struct zone *zone); /* * gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 8639f6b28746..851b4d7eef3a 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -233,7 +233,7 @@ void shake_page(struct page *p, int access) lru_add_drain_all(); if (PageLRU(p)) return; - drain_all_pages(); + drain_all_pages(NULL); if (PageLRU(p) || is_free_buddy_page(p)) return; } @@ -1661,7 +1661,7 @@ static int __soft_offline_page(struct page *page, int flags) if (!is_free_buddy_page(page)) lru_add_drain_all(); if (!is_free_buddy_page(page)) - drain_all_pages(); + drain_all_pages(NULL); SetPageHWPoison(page); if (!is_free_buddy_page(page)) pr_info("soft offline: %#lx: page leaked\n", diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index 1bf4807cb21e..aa0c6e5a3065 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1725,7 +1725,7 @@ repeat: if (drain) { lru_add_drain_all(); cond_resched(); - drain_all_pages(); + drain_all_pages(NULL); } pfn = scan_movable_pages(start_pfn, end_pfn); @@ -1747,7 +1747,7 @@ repeat: lru_add_drain_all(); yield(); /* drain pcp pages, this is synchronous. */ - drain_all_pages(); + drain_all_pages(NULL); /* * dissolve free hugepages in the memory block before doing offlining * actually in order to make hugetlbfs's object counting consistent. diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 701fe9018fdc..13d5796de8f3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1267,55 +1267,75 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) #endif /* - * Drain pages of the indicated processor. + * Drain pcplists of the indicated processor and zone. * * The processor must either be the current processor and the * thread pinned to the current processor or a processor that * is not online. */ -static void drain_pages(unsigned int cpu) +static void drain_pages_zone(unsigned int cpu, struct zone *zone) { unsigned long flags; - struct zone *zone; + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; - for_each_populated_zone(zone) { - struct per_cpu_pageset *pset; - struct per_cpu_pages *pcp; + local_irq_save(flags); + pset = per_cpu_ptr(zone->pageset, cpu); - local_irq_save(flags); - pset = per_cpu_ptr(zone->pageset, cpu); + pcp = &pset->pcp; + if (pcp->count) { + free_pcppages_bulk(zone, pcp->count, pcp); + pcp->count = 0; + } + local_irq_restore(flags); +} - pcp = &pset->pcp; - if (pcp->count) { - free_pcppages_bulk(zone, pcp->count, pcp); - pcp->count = 0; - } - local_irq_restore(flags); +/* + * Drain pcplists of all zones on the indicated processor. + * + * The processor must either be the current processor and the + * thread pinned to the current processor or a processor that + * is not online. + */ +static void drain_pages(unsigned int cpu) +{ + struct zone *zone; + + for_each_populated_zone(zone) { + drain_pages_zone(cpu, zone); } } /* * Spill all of this CPU's per-cpu pages back into the buddy allocator. + * + * The CPU has to be pinned. When zone parameter is non-NULL, spill just + * the single zone's pages. */ -void drain_local_pages(void *arg) +void drain_local_pages(struct zone *zone) { - drain_pages(smp_processor_id()); + int cpu = smp_processor_id(); + + if (zone) + drain_pages_zone(cpu, zone); + else + drain_pages(cpu); } /* * Spill all the per-cpu pages from all CPUs back into the buddy allocator. * + * When zone parameter is non-NULL, spill just the single zone's pages. + * * Note that this code is protected against sending an IPI to an offline * CPU but does not guarantee sending an IPI to newly hotplugged CPUs: * on_each_cpu_mask() blocks hotplug and won't talk to offlined CPUs but * nothing keeps CPUs from showing up after we populated the cpumask and * before the call to on_each_cpu_mask(). */ -void drain_all_pages(void) +void drain_all_pages(struct zone *zone) { int cpu; - struct per_cpu_pageset *pcp; - struct zone *zone; /* * Allocate in the BSS so we wont require allocation in @@ -1330,20 +1350,31 @@ void drain_all_pages(void) * disables preemption as part of its processing */ for_each_online_cpu(cpu) { + struct per_cpu_pageset *pcp; + struct zone *z; bool has_pcps = false; - for_each_populated_zone(zone) { + + if (zone) { pcp = per_cpu_ptr(zone->pageset, cpu); - if (pcp->pcp.count) { + if (pcp->pcp.count) has_pcps = true; - break; + } else { + for_each_populated_zone(z) { + pcp = per_cpu_ptr(z->pageset, cpu); + if (pcp->pcp.count) { + has_pcps = true; + break; + } } } + if (has_pcps) cpumask_set_cpu(cpu, &cpus_with_pcps); else cpumask_clear_cpu(cpu, &cpus_with_pcps); } - on_each_cpu_mask(&cpus_with_pcps, drain_local_pages, NULL, 1); + on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, + zone, 1); } #ifdef CONFIG_HIBERNATION @@ -2433,7 +2464,7 @@ retry: * pages are pinned on the per-cpu lists. Drain them and try again */ if (!page && !drained) { - drain_all_pages(); + drain_all_pages(NULL); drained = true; goto retry; } @@ -6385,7 +6416,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, */ lru_add_drain_all(); - drain_all_pages(); + drain_all_pages(NULL); order = 0; outer_start = start; diff --git a/mm/page_isolation.c b/mm/page_isolation.c index c8778f7e208e..f2452e5116b2 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -68,7 +68,7 @@ out: spin_unlock_irqrestore(&zone->lock, flags); if (!ret) - drain_all_pages(); + drain_all_pages(NULL); return ret; } -- cgit v1.2.3 From ec25af84b23b6862341b5b5b68d24be3f53b8d2c Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:04 -0800 Subject: mm, page_isolation: drain single zone pcplists When setting MIGRATETYPE_ISOLATE on a pageblock, pcplists are drained to have a better chance that all pages will be successfully isolated and not left in the per-cpu caches. Since isolation is always concerned with a single zone, we can reduce the pcplists drain to the single zone, which is now possible. The change should make memory isolation faster and not disturbing unrelated pcplists anymore. Signed-off-by: Vlastimil Babka Cc: Naoya Horiguchi Cc: Mel Gorman Cc: Rik van Riel Cc: Yasuaki Ishimatsu Cc: Zhang Yanfei Cc: Xishi Qiu Cc: Vladimir Davydov Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Marek Szyprowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_isolation.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_isolation.c b/mm/page_isolation.c index f2452e5116b2..72f5ac381ab3 100644 --- a/mm/page_isolation.c +++ b/mm/page_isolation.c @@ -68,7 +68,7 @@ out: spin_unlock_irqrestore(&zone->lock, flags); if (!ret) - drain_all_pages(NULL); + drain_all_pages(zone); return ret; } -- cgit v1.2.3 From 510f550788b8aba5070c59ec652de8b4ab660852 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:07 -0800 Subject: mm, cma: drain single zone pcplists CMA allocation drains pcplists so that pages can merge back to buddy allocator. Since it operates on a single zone, we can reduce the pcplists drain to the single zone, which is now possible. The change should make CMA allocations faster and not disturbing unrelated pcplists anymore. Signed-off-by: Vlastimil Babka Cc: Naoya Horiguchi Cc: Mel Gorman Cc: Rik van Riel Cc: Yasuaki Ishimatsu Cc: Zhang Yanfei Cc: Xishi Qiu Cc: Vladimir Davydov Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Marek Szyprowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 13d5796de8f3..f3a6bf12cbc1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -6416,7 +6416,7 @@ int alloc_contig_range(unsigned long start, unsigned long end, */ lru_add_drain_all(); - drain_all_pages(NULL); + drain_all_pages(cc.zone); order = 0; outer_start = start; -- cgit v1.2.3 From c05543293e0bf586842844c14fd8c598f494a107 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:10 -0800 Subject: mm, memory_hotplug/failure: drain single zone pcplists Memory hotplug and failure mechanisms have several places where pcplists are drained so that pages are returned to the buddy allocator and can be e.g. prepared for offlining. This is always done in the context of a single zone, we can reduce the pcplists drain to the single zone, which is now possible. The change should make memory offlining due to hotremove or failure faster and not disturbing unrelated pcplists anymore. Signed-off-by: Vlastimil Babka Cc: Naoya Horiguchi Cc: Mel Gorman Cc: Rik van Riel Cc: Yasuaki Ishimatsu Cc: Zhang Yanfei Cc: Xishi Qiu Cc: Vladimir Davydov Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Marek Szyprowski Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memory-failure.c | 4 ++-- mm/memory_hotplug.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 851b4d7eef3a..84e7ded04321 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -233,7 +233,7 @@ void shake_page(struct page *p, int access) lru_add_drain_all(); if (PageLRU(p)) return; - drain_all_pages(NULL); + drain_all_pages(page_zone(p)); if (PageLRU(p) || is_free_buddy_page(p)) return; } @@ -1661,7 +1661,7 @@ static int __soft_offline_page(struct page *page, int flags) if (!is_free_buddy_page(page)) lru_add_drain_all(); if (!is_free_buddy_page(page)) - drain_all_pages(NULL); + drain_all_pages(page_zone(page)); SetPageHWPoison(page); if (!is_free_buddy_page(page)) pr_info("soft offline: %#lx: page leaked\n", diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index aa0c6e5a3065..9fab10795bea 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -1725,7 +1725,7 @@ repeat: if (drain) { lru_add_drain_all(); cond_resched(); - drain_all_pages(NULL); + drain_all_pages(zone); } pfn = scan_movable_pages(start_pfn, end_pfn); @@ -1747,7 +1747,7 @@ repeat: lru_add_drain_all(); yield(); /* drain pcp pages, this is synchronous. */ - drain_all_pages(NULL); + drain_all_pages(zone); /* * dissolve free hugepages in the memory block before doing offlining * actually in order to make hugetlbfs's object counting consistent. -- cgit v1.2.3 From ab1f306fa92f4d875c8f0b9e9f90e27ca8e7b37b Mon Sep 17 00:00:00 2001 From: Yu Zhao Date: Wed, 10 Dec 2014 15:43:17 -0800 Subject: mm: verify compound order when freeing a page This allows us to catch the bug fixed in the previous patch (mm: free compound page with correct order). Here we also verify whether a page is tail page or not -- tail pages are supposed to be freed along with their head, not by themselves. Signed-off-by: Yu Zhao Reviewed-by: "Kirill A. Shutemov" Cc: Andrea Arcangeli Cc: Mel Gorman Cc: David Rientjes Cc: Bob Liu Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f3a6bf12cbc1..b7c18f094697 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -741,6 +741,9 @@ static bool free_pages_prepare(struct page *page, unsigned int order) int i; int bad = 0; + VM_BUG_ON_PAGE(PageTail(page), page); + VM_BUG_ON_PAGE(PageHead(page) && compound_order(page) != order, page); + trace_mm_page_free(page, order); kmemcheck_free_shadow(page, order); -- cgit v1.2.3 From 1da58ee2a0279a1b0afd3248396de5659b8cf95b Mon Sep 17 00:00:00 2001 From: Jamie Liu Date: Wed, 10 Dec 2014 15:43:20 -0800 Subject: mm: vmscan: count only dirty pages as congested shrink_page_list() counts all pages with a mapping, including clean pages, toward nr_congested if they're on a write-congested BDI. shrink_inactive_list() then sets ZONE_CONGESTED if nr_dirty == nr_congested. Fix this apples-to-oranges comparison by only counting pages for nr_congested if they count for nr_dirty. Signed-off-by: Jamie Liu Cc: Johannes Weiner Cc: Mel Gorman Cc: Greg Thelen Cc: Hugh Dickins Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/vmscan.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/vmscan.c b/mm/vmscan.c index 59605b7c9970..53157e157061 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -874,7 +874,8 @@ static unsigned long shrink_page_list(struct list_head *page_list, * end of the LRU a second time. */ mapping = page_mapping(page); - if ((mapping && bdi_write_congested(mapping->backing_dev_info)) || + if (((dirty || writeback) && mapping && + bdi_write_congested(mapping->backing_dev_info)) || (writeback && PageReclaim(page))) nr_congested++; -- cgit v1.2.3 From ebff398017c69a3810bcbc5200ba224d5ccaa207 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:22 -0800 Subject: mm, compaction: pass classzone_idx and alloc_flags to watermark checking Compaction relies on zone watermark checks for decisions such as if it's worth to start compacting in compaction_suitable() or whether compaction should stop in compact_finished(). The watermark checks take classzone_idx and alloc_flags parameters, which are related to the memory allocation request. But from the context of compaction they are currently passed as 0, including the direct compaction which is invoked to satisfy the allocation request, and could therefore know the proper values. The lack of proper values can lead to mismatch between decisions taken during compaction and decisions related to the allocation request. Lack of proper classzone_idx value means that lowmem_reserve is not taken into account. This has manifested (during recent changes to deferred compaction) when DMA zone was used as fallback for preferred Normal zone. compaction_suitable() without proper classzone_idx would think that the watermarks are already satisfied, but watermark check in get_page_from_freelist() would fail. Because of this problem, deferring compaction has extra complexity that can be removed in the following patch. The issue (not confirmed in practice) with missing alloc_flags is opposite in nature. For allocations that include ALLOC_HIGH, ALLOC_HIGHER or ALLOC_CMA in alloc_flags (the last includes all MOVABLE allocations on CMA-enabled systems) the watermark checking in compaction with 0 passed will be stricter than in get_page_from_freelist(). In these cases compaction might be running for a longer time than is really needed. Another issue compaction_suitable() is that the check for "does the zone need compaction at all?" comes only after the check "does the zone have enough free free pages to succeed compaction". The latter considers extra pages for migration and can therefore in some situations fail and return COMPACT_SKIPPED, although the high-order allocation would succeed and we should return COMPACT_PARTIAL. This patch fixes these problems by adding alloc_flags and classzone_idx to struct compact_control and related functions involved in direct compaction and watermark checking. Where possible, all other callers of compaction_suitable() pass proper values where those are known. This is currently limited to classzone_idx, which is sometimes known in kswapd context. However, the direct reclaim callers should_continue_reclaim() and compaction_ready() do not currently know the proper values, so the coordination between reclaim and compaction may still not be as accurate as it could. This can be fixed later, if it's shown to be an issue. Additionaly the checks in compact_suitable() are reordered to address the second issue described above. The effect of this patch should be slightly better high-order allocation success rates and/or less compaction overhead, depending on the type of allocations and presence of CMA. It allows simplifying deferred compaction code in a followup patch. When testing with stress-highalloc, there was some slight improvement (which might be just due to variance) in success rates of non-THP-like allocations. Signed-off-by: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Acked-by: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 8 ++++++-- mm/compaction.c | 48 ++++++++++++++++++++++++++-------------------- mm/internal.h | 2 ++ mm/page_alloc.c | 1 + mm/vmscan.c | 12 ++++++------ 5 files changed, 42 insertions(+), 29 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 60bdf8dc02a3..d896765a15b0 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -33,10 +33,12 @@ extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, enum migrate_mode mode, int *contended, + int alloc_flags, int classzone_idx, struct zone **candidate_zone); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); -extern unsigned long compaction_suitable(struct zone *zone, int order); +extern unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx); /* Do not skip compaction more than 64 times */ #define COMPACT_MAX_DEFER_SHIFT 6 @@ -103,6 +105,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, enum migrate_mode mode, int *contended, + int alloc_flags, int classzone_idx, struct zone **candidate_zone) { return COMPACT_CONTINUE; @@ -116,7 +119,8 @@ static inline void reset_isolation_suitable(pg_data_t *pgdat) { } -static inline unsigned long compaction_suitable(struct zone *zone, int order) +static inline unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx) { return COMPACT_SKIPPED; } diff --git a/mm/compaction.c b/mm/compaction.c index f9792ba3537c..1fc6736815e0 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1086,9 +1086,9 @@ static int compact_finished(struct zone *zone, struct compact_control *cc, /* Compaction run is not finished if the watermark is not met */ watermark = low_wmark_pages(zone); - watermark += (1 << cc->order); - if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) + if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, + cc->alloc_flags)) return COMPACT_CONTINUE; /* Direct compactor: Is a suitable page free? */ @@ -1114,7 +1114,8 @@ static int compact_finished(struct zone *zone, struct compact_control *cc, * COMPACT_PARTIAL - If the allocation would succeed without compaction * COMPACT_CONTINUE - If compaction should run now */ -unsigned long compaction_suitable(struct zone *zone, int order) +unsigned long compaction_suitable(struct zone *zone, int order, + int alloc_flags, int classzone_idx) { int fragindex; unsigned long watermark; @@ -1126,21 +1127,30 @@ unsigned long compaction_suitable(struct zone *zone, int order) if (order == -1) return COMPACT_CONTINUE; + watermark = low_wmark_pages(zone); + /* + * If watermarks for high-order allocation are already met, there + * should be no need for compaction at all. + */ + if (zone_watermark_ok(zone, order, watermark, classzone_idx, + alloc_flags)) + return COMPACT_PARTIAL; + /* * Watermarks for order-0 must be met for compaction. Note the 2UL. * This is because during migration, copies of pages need to be * allocated and for a short time, the footprint is higher */ - watermark = low_wmark_pages(zone) + (2UL << order); - if (!zone_watermark_ok(zone, 0, watermark, 0, 0)) + watermark += (2UL << order); + if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags)) return COMPACT_SKIPPED; /* * fragmentation index determines if allocation failures are due to * low memory or external fragmentation * - * index of -1000 implies allocations might succeed depending on - * watermarks + * index of -1000 would imply allocations might succeed depending on + * watermarks, but we already failed the high-order watermark check * index towards 0 implies failure is due to lack of memory * index towards 1000 implies failure is due to fragmentation * @@ -1150,10 +1160,6 @@ unsigned long compaction_suitable(struct zone *zone, int order) if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) return COMPACT_SKIPPED; - if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, - 0, 0)) - return COMPACT_PARTIAL; - return COMPACT_CONTINUE; } @@ -1165,7 +1171,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); const bool sync = cc->mode != MIGRATE_ASYNC; - ret = compaction_suitable(zone, cc->order); + ret = compaction_suitable(zone, cc->order, cc->alloc_flags, + cc->classzone_idx); switch (ret) { case COMPACT_PARTIAL: case COMPACT_SKIPPED: @@ -1254,7 +1261,8 @@ out: } static unsigned long compact_zone_order(struct zone *zone, int order, - gfp_t gfp_mask, enum migrate_mode mode, int *contended) + gfp_t gfp_mask, enum migrate_mode mode, int *contended, + int alloc_flags, int classzone_idx) { unsigned long ret; struct compact_control cc = { @@ -1264,6 +1272,8 @@ static unsigned long compact_zone_order(struct zone *zone, int order, .gfp_mask = gfp_mask, .zone = zone, .mode = mode, + .alloc_flags = alloc_flags, + .classzone_idx = classzone_idx, }; INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.migratepages); @@ -1295,6 +1305,7 @@ int sysctl_extfrag_threshold = 500; unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, enum migrate_mode mode, int *contended, + int alloc_flags, int classzone_idx, struct zone **candidate_zone) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); @@ -1303,7 +1314,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, struct zoneref *z; struct zone *zone; int rc = COMPACT_DEFERRED; - int alloc_flags = 0; int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */ *contended = COMPACT_CONTENDED_NONE; @@ -1312,10 +1322,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, if (!order || !may_enter_fs || !may_perform_io) return COMPACT_SKIPPED; -#ifdef CONFIG_CMA - if (gfpflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE) - alloc_flags |= ALLOC_CMA; -#endif /* Compact each zone in the list */ for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, nodemask) { @@ -1326,7 +1332,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, continue; status = compact_zone_order(zone, order, gfp_mask, mode, - &zone_contended); + &zone_contended, alloc_flags, classzone_idx); rc = max(status, rc); /* * It takes at least one zone that wasn't lock contended @@ -1335,8 +1341,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, all_zones_contended &= zone_contended; /* If a normal allocation would succeed, stop compacting */ - if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, - alloc_flags)) { + if (zone_watermark_ok(zone, order, low_wmark_pages(zone), + classzone_idx, alloc_flags)) { *candidate_zone = zone; /* * We think the allocation will succeed in this zone, diff --git a/mm/internal.h b/mm/internal.h index a4f90ba7068e..b643938fcf12 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -168,6 +168,8 @@ struct compact_control { int order; /* order a direct compactor needs */ const gfp_t gfp_mask; /* gfp mask of a direct compactor */ + const int alloc_flags; /* alloc flags of a direct compactor */ + const int classzone_idx; /* zone index of a direct compactor */ struct zone *zone; int contended; /* Signal need_sched() or lock * contention detected during diff --git a/mm/page_alloc.c b/mm/page_alloc.c index b7c18f094697..e32121fa2ba9 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2341,6 +2341,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, compact_result = try_to_compact_pages(zonelist, order, gfp_mask, nodemask, mode, contended_compaction, + alloc_flags, classzone_idx, &last_compact_zone); current->flags &= ~PF_MEMALLOC; diff --git a/mm/vmscan.c b/mm/vmscan.c index 53157e157061..4636d9e822c1 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2249,7 +2249,7 @@ static inline bool should_continue_reclaim(struct zone *zone, return true; /* If compaction would go ahead or the allocation would succeed, stop */ - switch (compaction_suitable(zone, sc->order)) { + switch (compaction_suitable(zone, sc->order, 0, 0)) { case COMPACT_PARTIAL: case COMPACT_CONTINUE: return false; @@ -2346,7 +2346,7 @@ static inline bool compaction_ready(struct zone *zone, int order) * If compaction is not ready to start and allocation is not likely * to succeed without it, then keep reclaiming. */ - if (compaction_suitable(zone, order) == COMPACT_SKIPPED) + if (compaction_suitable(zone, order, 0, 0) == COMPACT_SKIPPED) return false; return watermark_ok; @@ -2824,8 +2824,8 @@ static bool zone_balanced(struct zone *zone, int order, balance_gap, classzone_idx, 0)) return false; - if (IS_ENABLED(CONFIG_COMPACTION) && order && - compaction_suitable(zone, order) == COMPACT_SKIPPED) + if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone, + order, 0, classzone_idx) == COMPACT_SKIPPED) return false; return true; @@ -2952,8 +2952,8 @@ static bool kswapd_shrink_zone(struct zone *zone, * from memory. Do not reclaim more than needed for compaction. */ if (IS_ENABLED(CONFIG_COMPACTION) && sc->order && - compaction_suitable(zone, sc->order) != - COMPACT_SKIPPED) + compaction_suitable(zone, sc->order, 0, classzone_idx) + != COMPACT_SKIPPED) testorder = 0; /* -- cgit v1.2.3 From 97d47a65be1e513edd02325ae828c9997878b578 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:25 -0800 Subject: mm, compaction: simplify deferred compaction Since commit 53853e2d2bfb ("mm, compaction: defer each zone individually instead of preferred zone"), compaction is deferred for each zone where sync direct compaction fails, and reset where it succeeds. However, it was observed that for DMA zone compaction often appeared to succeed while subsequent allocation attempt would not, due to different outcome of watermark check. In order to properly defer compaction in this zone, the candidate zone has to be passed back to __alloc_pages_direct_compact() and compaction deferred in the zone after the allocation attempt fails. The large source of mismatch between watermark check in compaction and allocation was the lack of alloc_flags and classzone_idx values in compaction, which has been fixed in the previous patch. So with this problem fixed, we can simplify the code by removing the candidate_zone parameter and deferring in __alloc_pages_direct_compact(). After this patch, the compaction activity during stress-highalloc benchmark is still somewhat increased, but it's negligible compared to the increase that occurred without the better watermark checking. This suggests that it is still possible to apparently succeed in compaction but fail to allocate, possibly due to parallel allocation activity. [akpm@linux-foundation.org: fix build] Suggested-by: Joonsoo Kim Signed-off-by: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Cc: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compaction.h | 6 ++---- mm/compaction.c | 5 +---- mm/page_alloc.c | 12 +----------- 3 files changed, 4 insertions(+), 19 deletions(-) (limited to 'mm') diff --git a/include/linux/compaction.h b/include/linux/compaction.h index d896765a15b0..3238ffa33f68 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h @@ -33,8 +33,7 @@ extern int fragmentation_index(struct zone *zone, unsigned int order); extern unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask, enum migrate_mode mode, int *contended, - int alloc_flags, int classzone_idx, - struct zone **candidate_zone); + int alloc_flags, int classzone_idx); extern void compact_pgdat(pg_data_t *pgdat, int order); extern void reset_isolation_suitable(pg_data_t *pgdat); extern unsigned long compaction_suitable(struct zone *zone, int order, @@ -105,8 +104,7 @@ static inline bool compaction_restarting(struct zone *zone, int order) static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, enum migrate_mode mode, int *contended, - int alloc_flags, int classzone_idx, - struct zone **candidate_zone) + int alloc_flags, int classzone_idx) { return COMPACT_CONTINUE; } diff --git a/mm/compaction.c b/mm/compaction.c index 1fc6736815e0..75f4c1206d00 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1298,15 +1298,13 @@ int sysctl_extfrag_threshold = 500; * @mode: The migration mode for async, sync light, or sync migration * @contended: Return value that determines if compaction was aborted due to * need_resched() or lock contention - * @candidate_zone: Return the zone where we think allocation should succeed * * This is the main entry point for direct page compaction. */ unsigned long try_to_compact_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *nodemask, enum migrate_mode mode, int *contended, - int alloc_flags, int classzone_idx, - struct zone **candidate_zone) + int alloc_flags, int classzone_idx) { enum zone_type high_zoneidx = gfp_zone(gfp_mask); int may_enter_fs = gfp_mask & __GFP_FS; @@ -1343,7 +1341,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, /* If a normal allocation would succeed, stop compacting */ if (zone_watermark_ok(zone, order, low_wmark_pages(zone), classzone_idx, alloc_flags)) { - *candidate_zone = zone; /* * We think the allocation will succeed in this zone, * but it is not certain, hence the false. The caller diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e32121fa2ba9..edb0ce1e7cf3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2330,7 +2330,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, int classzone_idx, int migratetype, enum migrate_mode mode, int *contended_compaction, bool *deferred_compaction) { - struct zone *last_compact_zone = NULL; unsigned long compact_result; struct page *page; @@ -2341,8 +2340,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, compact_result = try_to_compact_pages(zonelist, order, gfp_mask, nodemask, mode, contended_compaction, - alloc_flags, classzone_idx, - &last_compact_zone); + alloc_flags, classzone_idx); current->flags &= ~PF_MEMALLOC; switch (compact_result) { @@ -2379,14 +2377,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, return page; } - /* - * last_compact_zone is where try_to_compact_pages thought allocation - * should succeed, so it did not defer compaction. But here we know - * that it didn't succeed, so we do the defer. - */ - if (last_compact_zone && mode != MIGRATE_ASYNC) - defer_compaction(last_compact_zone, order); - /* * It's bad if compaction run occurs and fails. The most likely reason * is that pages exist, but not enough to satisfy watermarks. -- cgit v1.2.3 From f86697953976b465a55e175ac999d43495a1dacc Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:28 -0800 Subject: mm, compaction: defer only on COMPACT_COMPLETE Deferred compaction is employed to avoid compacting zone where sync direct compaction has recently failed. As such, it makes sense to only defer when a full zone was scanned, which is when compact_zone returns with COMPACT_COMPLETE. It's less useful to defer when compact_zone returns with apparent success (COMPACT_PARTIAL), followed by a watermark check failure, which can happen due to parallel allocation activity. It also does not make much sense to defer compaction which was completely skipped (COMPACT_SKIP) for being unsuitable in the first place. This patch therefore makes deferred compaction trigger only when COMPACT_COMPLETE is returned from compact_zone(). Results of stress-highalloc becnmark show the difference is within measurement error, so the issue is rather cosmetic. Signed-off-by: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Acked-by: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 75f4c1206d00..eaf0a925ff26 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1362,7 +1362,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, goto break_loop; } - if (mode != MIGRATE_ASYNC) { + if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) { /* * We think that allocation won't succeed in this zone * so we defer compaction there. If it ends up -- cgit v1.2.3 From 6bace090a25455cb1dffaa9ab4aabc36dbd44d4a Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:31 -0800 Subject: mm, compaction: always update cached scanner positions Compaction caches the migration and free scanner positions between compaction invocations, so that the whole zone gets eventually scanned and there is no bias towards the initial scanner positions at the beginning/end of the zone. The cached positions are continuously updated as scanners progress and the updating stops as soon as a page is successfully isolated. The reasoning behind this is that a pageblock where isolation succeeded is likely to succeed again in near future and it should be worth revisiting it. However, the downside is that potentially many pages are rescanned without successful isolation. At worst, there might be a page where isolation from LRU succeeds but migration fails (potentially always). So upon encountering this page, cached position would always stop being updated for no good reason. It might have been useful to let such page be rescanned with sync compaction after async one failed, but this is now handled by caching scanner position for async and sync mode separately since commit 35979ef33931 ("mm, compaction: add per-zone migration pfn cache for async compaction"). After this patch, cached positions are updated unconditionally. In stress-highalloc benchmark, this has decreased the numbers of scanned pages by few percent, without affecting allocation success rates. To prevent free scanner from leaving free pages behind after they are returned due to page migration failure, the cached scanner pfn is changed to point to the pageblock of the returned free page with the highest pfn, before leaving compact_zone(). [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Acked-by: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 43 +++++++++++++++++++++++-------------------- mm/internal.h | 5 ----- 2 files changed, 23 insertions(+), 25 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index eaf0a925ff26..8f211bd2ea0d 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -41,15 +41,17 @@ static inline void count_compact_events(enum vm_event_item item, long delta) static unsigned long release_freepages(struct list_head *freelist) { struct page *page, *next; - unsigned long count = 0; + unsigned long high_pfn = 0; list_for_each_entry_safe(page, next, freelist, lru) { + unsigned long pfn = page_to_pfn(page); list_del(&page->lru); __free_page(page); - count++; + if (pfn > high_pfn) + high_pfn = pfn; } - return count; + return high_pfn; } static void map_pages(struct list_head *list) @@ -195,16 +197,12 @@ static void update_pageblock_skip(struct compact_control *cc, /* Update where async and sync compaction should restart */ if (migrate_scanner) { - if (cc->finished_update_migrate) - return; if (pfn > zone->compact_cached_migrate_pfn[0]) zone->compact_cached_migrate_pfn[0] = pfn; if (cc->mode != MIGRATE_ASYNC && pfn > zone->compact_cached_migrate_pfn[1]) zone->compact_cached_migrate_pfn[1] = pfn; } else { - if (cc->finished_update_free) - return; if (pfn < zone->compact_cached_free_pfn) zone->compact_cached_free_pfn = pfn; } @@ -715,7 +713,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, del_page_from_lru_list(page, lruvec, page_lru(page)); isolate_success: - cc->finished_update_migrate = true; list_add(&page->lru, migratelist); cc->nr_migratepages++; nr_isolated++; @@ -888,15 +885,6 @@ static void isolate_freepages(struct compact_control *cc) isolate_start_pfn : block_start_pfn - pageblock_nr_pages; - /* - * Set a flag that we successfully isolated in this pageblock. - * In the next loop iteration, zone->compact_cached_free_pfn - * will not be updated and thus it will effectively contain the - * highest pageblock we isolated pages from. - */ - if (isolated) - cc->finished_update_free = true; - /* * isolate_freepages_block() might have aborted due to async * compaction being contended @@ -1251,9 +1239,24 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) } out: - /* Release free pages and check accounting */ - cc->nr_freepages -= release_freepages(&cc->freepages); - VM_BUG_ON(cc->nr_freepages != 0); + /* + * Release free pages and update where the free scanner should restart, + * so we don't leave any returned pages behind in the next attempt. + */ + if (cc->nr_freepages > 0) { + unsigned long free_pfn = release_freepages(&cc->freepages); + + cc->nr_freepages = 0; + VM_BUG_ON(free_pfn == 0); + /* The cached pfn is always the first in a pageblock */ + free_pfn &= ~(pageblock_nr_pages-1); + /* + * Only go back, not forward. The cached pfn might have been + * already reset to zone end in compact_finished() + */ + if (free_pfn > zone->compact_cached_free_pfn) + zone->compact_cached_free_pfn = free_pfn; + } trace_mm_compaction_end(ret); diff --git a/mm/internal.h b/mm/internal.h index b643938fcf12..efad241f7014 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -161,11 +161,6 @@ struct compact_control { unsigned long migrate_pfn; /* isolate_migratepages search base */ enum migrate_mode mode; /* Async or sync migration mode */ bool ignore_skip_hint; /* Scan blocks even if marked skip */ - bool finished_update_free; /* True when the zone cached pfns are - * no longer being updated - */ - bool finished_update_migrate; - int order; /* order a direct compactor needs */ const gfp_t gfp_mask; /* gfp mask of a direct compactor */ const int alloc_flags; /* alloc flags of a direct compactor */ -- cgit v1.2.3 From fdaf7f5c40f3d20690c236298418acf72eb664b5 Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Wed, 10 Dec 2014 15:43:34 -0800 Subject: mm, compaction: more focused lru and pcplists draining The goal of memory compaction is to create high-order freepages through page migration. Page migration however puts pages on the per-cpu lru_add cache, which is later flushed to per-cpu pcplists, and only after pcplists are drained the pages can actually merge. This can happen due to the per-cpu caches becoming full through further freeing, or explicitly. During direct compaction, it is useful to do the draining explicitly so that pages merge as soon as possible and compaction can detect success immediately and keep the latency impact at minimum. However the current implementation is far from ideal. Draining is done only in __alloc_pages_direct_compact(), after all zones were already compacted, and the decisions to continue or stop compaction in individual zones was done without the last batch of migrations being merged. It is also missing the draining of lru_add cache before the pcplists. This patch moves the draining for direct compaction into compact_zone(). It adds the missing lru_cache draining and uses the newly introduced single zone pcplists draining to reduce overhead and avoid impact on unrelated zones. Draining is only performed when it can actually lead to merging of a page of desired order (passed by cc->order). This means it is only done when migration occurred in the previously scanned cc->order aligned block(s) and the migration scanner is now pointing to the next cc->order aligned block. The patch has been tested with stress-highalloc benchmark from mmtests. Although overal allocation success rates of the benchmark were not affected, the number of detected compaction successes has doubled. This suggests that allocations were previously successful due to implicit merging caused by background activity, making a later allocation attempt succeed immediately, but not attributing the success to compaction. Since stress-highalloc always tries to allocate almost the whole memory, it cannot show the improvement in its reported success rate metric. However after this patch, compaction should detect success and terminate earlier, reducing the direct compaction latencies in a real scenario. Signed-off-by: Vlastimil Babka Cc: Minchan Kim Cc: Mel Gorman Cc: Joonsoo Kim Cc: Michal Nazarewicz Cc: Naoya Horiguchi Cc: Christoph Lameter Acked-by: Rik van Riel Cc: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/compaction.c | 43 ++++++++++++++++++++++++++++++++++++++++++- mm/page_alloc.c | 4 ---- 2 files changed, 42 insertions(+), 5 deletions(-) (limited to 'mm') diff --git a/mm/compaction.c b/mm/compaction.c index 8f211bd2ea0d..546e571e9d60 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -1158,6 +1158,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) unsigned long end_pfn = zone_end_pfn(zone); const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); const bool sync = cc->mode != MIGRATE_ASYNC; + unsigned long last_migrated_pfn = 0; ret = compaction_suitable(zone, cc->order, cc->alloc_flags, cc->classzone_idx); @@ -1203,6 +1204,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) while ((ret = compact_finished(zone, cc, migratetype)) == COMPACT_CONTINUE) { int err; + unsigned long isolate_start_pfn = cc->migrate_pfn; switch (isolate_migratepages(zone, cc)) { case ISOLATE_ABORT: @@ -1211,7 +1213,12 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) cc->nr_migratepages = 0; goto out; case ISOLATE_NONE: - continue; + /* + * We haven't isolated and migrated anything, but + * there might still be unflushed migrations from + * previous cc->order aligned block. + */ + goto check_drain; case ISOLATE_SUCCESS: ; } @@ -1236,6 +1243,40 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) goto out; } } + + /* + * Record where we could have freed pages by migration and not + * yet flushed them to buddy allocator. We use the pfn that + * isolate_migratepages() started from in this loop iteration + * - this is the lowest page that could have been isolated and + * then freed by migration. + */ + if (!last_migrated_pfn) + last_migrated_pfn = isolate_start_pfn; + +check_drain: + /* + * Has the migration scanner moved away from the previous + * cc->order aligned block where we migrated from? If yes, + * flush the pages that were freed, so that they can merge and + * compact_finished() can detect immediately if allocation + * would succeed. + */ + if (cc->order > 0 && last_migrated_pfn) { + int cpu; + unsigned long current_block_start = + cc->migrate_pfn & ~((1UL << cc->order) - 1); + + if (last_migrated_pfn < current_block_start) { + cpu = get_cpu(); + lru_add_drain_cpu(cpu); + drain_local_pages(zone); + put_cpu(); + /* No more flushing until we migrate again */ + last_migrated_pfn = 0; + } + } + } out: diff --git a/mm/page_alloc.c b/mm/page_alloc.c index edb0ce1e7cf3..7352aa45a335 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2359,10 +2359,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, */ count_vm_event(COMPACTSTALL); - /* Page migration frees to the PCP lists but we want merging */ - drain_pages(get_cpu()); - put_cpu(); - page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist, high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS, -- cgit v1.2.3 From bc2f2e7ffe5b6292c74ee1206d6ca303e13886b2 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Dec 2014 15:43:40 -0800 Subject: memcg: simplify unreclaimable groups handling in soft limit reclaim If we fail to reclaim anything from a cgroup during a soft reclaim pass we want to get the next largest cgroup exceeding its soft limit. To achieve this, we should obviously remove the current group from the tree and then pick the largest group. Currently we have a weird loop instead. Let's simplify it. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 26 ++++---------------------- 1 file changed, 4 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3a628435f36a..975207a9cc65 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3518,34 +3518,16 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, nr_reclaimed += reclaimed; *total_scanned += nr_scanned; spin_lock_irq(&mctz->lock); + __mem_cgroup_remove_exceeded(mz, mctz); /* * If we failed to reclaim anything from this memory cgroup * it is time to move on to the next cgroup */ next_mz = NULL; - if (!reclaimed) { - do { - /* - * Loop until we find yet another one. - * - * By the time we get the soft_limit lock - * again, someone might have aded the - * group back on the RB tree. Iterate to - * make sure we get a different mem. - * mem_cgroup_largest_soft_limit_node returns - * NULL if no other cgroup is present on - * the tree - */ - next_mz = - __mem_cgroup_largest_soft_limit_node(mctz); - if (next_mz == mz) - css_put(&next_mz->memcg->css); - else /* next_mz == NULL or other memcg */ - break; - } while (1); - } - __mem_cgroup_remove_exceeded(mz, mctz); + if (!reclaimed) + next_mz = __mem_cgroup_largest_soft_limit_node(mctz); + excess = soft_limit_excess(mz->memcg); /* * One school of thought says that we should not add -- cgit v1.2.3 From dfe0e773d0258a4d7dfd763e1fda04aa27680b90 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:43:43 -0800 Subject: mm: memcontrol: update mem_cgroup_page_lruvec() documentation Commit 7512102cf64d ("memcg: fix GPF when cgroup removal races with last exit") added a pc->mem_cgroup reset into mem_cgroup_page_lruvec() to prevent a crash where an anon page gets uncharged on unmap, the memcg is released, and then the final LRU isolation on free dereferences the stale pc->mem_cgroup pointer. But since commit 0a31bc97c80c ("mm: memcontrol: rewrite uncharge API"), pages are only uncharged AFTER that final LRU isolation, which guarantees the memcg's lifetime until then. pc->mem_cgroup now only needs to be reset for swapcache readahead pages. Update the comment and callsite requirements accordingly. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 975207a9cc65..b495f29d4746 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1262,9 +1262,13 @@ out: } /** - * mem_cgroup_page_lruvec - return lruvec for adding an lru page + * mem_cgroup_page_lruvec - return lruvec for isolating/putting an LRU page * @page: the page * @zone: zone of the page + * + * This function is only safe when following the LRU page isolation + * and putback protocol: the LRU lock must be held, and the page must + * either be PageLRU() or the caller must have isolated/allocated it. */ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) { @@ -1282,13 +1286,9 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) memcg = pc->mem_cgroup; /* - * Surreptitiously switch any uncharged offlist page to root: - * an uncharged page off lru does nothing to secure - * its former mem_cgroup from sudden removal. - * - * Our caller holds lru_lock, and PageCgroupUsed is updated - * under page_cgroup lock: between them, they make all uses - * of pc->mem_cgroup safe. + * Swapcache readahead pages are added to the LRU - and + * possibly migrated - before they are charged. Ensure + * pc->mem_cgroup is sane. */ if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) pc->mem_cgroup = memcg = root_mem_cgroup; -- cgit v1.2.3 From 7d5e324573b0ffd7098ab880c82096ca29a11f7f Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:43:46 -0800 Subject: mm: memcontrol: clarify migration where old page is uncharged Better explain re-entrant migration when compaction races with reclaim, and also mention swapcache readahead pages as possible uncharged migration sources. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b495f29d4746..a0ae64ca55bf 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -6157,7 +6157,12 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, if (PageCgroupUsed(pc)) return; - /* Re-entrant migration: old page already uncharged? */ + /* + * Swapcache readahead pages can get migrated before being + * charged, and migration from compaction can happen to an + * uncharged page when the PFN walker finds a page that + * reclaim just put back on the LRU but has not released yet. + */ pc = lookup_page_cgroup(oldpage); if (!PageCgroupUsed(pc)) return; -- cgit v1.2.3 From 8c0145b62ef7e9019ab39284ed88873c483c8003 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Dec 2014 15:43:48 -0800 Subject: memcg: remove activate_kmem_mutex The activate_kmem_mutex is used to serialize memcg.kmem.limit updates, but we already serialize them with memcg_limit_mutex so let's remove the former. Signed-off-by: Vladimir Davydov Acked-by: Johannes Weiner Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 24 +++++------------------- 1 file changed, 5 insertions(+), 19 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a0ae64ca55bf..420461bcaefd 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2627,8 +2627,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, */ static DEFINE_MUTEX(memcg_slab_mutex); -static DEFINE_MUTEX(activate_kmem_mutex); - /* * This is a bit cumbersome, but it is rarely used and avoids a backpointer * in the memcg_cache_params struct. @@ -3747,9 +3745,8 @@ static u64 mem_cgroup_read_u64(struct cgroup_subsys_state *css, } #ifdef CONFIG_MEMCG_KMEM -/* should be called with activate_kmem_mutex held */ -static int __memcg_activate_kmem(struct mem_cgroup *memcg, - unsigned long nr_pages) +static int memcg_activate_kmem(struct mem_cgroup *memcg, + unsigned long nr_pages) { int err = 0; int memcg_id; @@ -3811,17 +3808,6 @@ out: return err; } -static int memcg_activate_kmem(struct mem_cgroup *memcg, - unsigned long nr_pages) -{ - int ret; - - mutex_lock(&activate_kmem_mutex); - ret = __memcg_activate_kmem(memcg, nr_pages); - mutex_unlock(&activate_kmem_mutex); - return ret; -} - static int memcg_update_kmem_limit(struct mem_cgroup *memcg, unsigned long limit) { @@ -3844,14 +3830,14 @@ static int memcg_propagate_kmem(struct mem_cgroup *memcg) if (!parent) return 0; - mutex_lock(&activate_kmem_mutex); + mutex_lock(&memcg_limit_mutex); /* * If the parent cgroup is not kmem-active now, it cannot be activated * after this point, because it has at least one child already. */ if (memcg_kmem_is_active(parent)) - ret = __memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); - mutex_unlock(&activate_kmem_mutex); + ret = memcg_activate_kmem(memcg, PAGE_COUNTER_MAX); + mutex_unlock(&memcg_limit_mutex); return ret; } #else -- cgit v1.2.3 From b9982f8d27f893de2e8e98a25c68bb838b5311a4 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 10 Dec 2014 15:43:51 -0800 Subject: mm: memcontrol: micro-optimize mem_cgroup_split_huge_fixup() Don't call lookup_page_cgroup() when memcg is disabled. Cc: Johannes Weiner Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 420461bcaefd..8c10d4ca9c3f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3174,7 +3174,7 @@ static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) */ void mem_cgroup_split_huge_fixup(struct page *head) { - struct page_cgroup *head_pc = lookup_page_cgroup(head); + struct page_cgroup *head_pc; struct page_cgroup *pc; struct mem_cgroup *memcg; int i; @@ -3182,6 +3182,8 @@ void mem_cgroup_split_huge_fixup(struct page *head) if (mem_cgroup_disabled()) return; + head_pc = lookup_page_cgroup(head); + memcg = head_pc->mem_cgroup; for (i = 1; i < HPAGE_PMD_NR; i++) { pc = head_pc + i; -- cgit v1.2.3 From 7bdd143c37e591c254d0991ac398a53f3f9ef1af Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:43:54 -0800 Subject: mm: memcontrol: uncharge pages on swapout This series gets rid of the remaining page_cgroup flags, thus cutting the memcg per-page overhead down to one pointer. This patch (of 4): mem_cgroup_swapout() is called with exclusive access to the page at the end of the page's lifetime. Instead of clearing the PCG_MEMSW flag and deferring the uncharge, just do it right away. This allows follow-up patches to simplify the uncharge code. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Acked-by: Vladimir Davydov Reviewed-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 8c10d4ca9c3f..266a440c89f9 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5777,6 +5777,7 @@ static void __init enable_swap_cgroup(void) */ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) { + struct mem_cgroup *memcg; struct page_cgroup *pc; unsigned short oldid; @@ -5793,13 +5794,22 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) return; VM_BUG_ON_PAGE(!(pc->flags & PCG_MEMSW), page); + memcg = pc->mem_cgroup; - oldid = swap_cgroup_record(entry, mem_cgroup_id(pc->mem_cgroup)); + oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); VM_BUG_ON_PAGE(oldid, page); + mem_cgroup_swap_statistics(memcg, true); + + pc->flags = 0; - pc->flags &= ~PCG_MEMSW; - css_get(&pc->mem_cgroup->css); - mem_cgroup_swap_statistics(pc->mem_cgroup, true); + if (!mem_cgroup_is_root(memcg)) + page_counter_uncharge(&memcg->memory, 1); + + /* XXX: caller holds IRQ-safe mapping->tree_lock */ + VM_BUG_ON(!irqs_disabled()); + + mem_cgroup_charge_statistics(memcg, page, -1); + memcg_check_events(memcg, page); } /** -- cgit v1.2.3 From 18eca2e636f921e6350dc31b5b450bb4102d664f Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:43:57 -0800 Subject: mm: memcontrol: remove unnecessary PCG_MEMSW memory+swap charge flag Now that mem_cgroup_swapout() fully uncharges the page, every page that is still in use when reaching mem_cgroup_uncharge() is known to carry both the memory and the memory+swap charge. Simplify the uncharge path and remove the PCG_MEMSW page flag accordingly. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Reviewed-by: Vladimir Davydov Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_cgroup.h | 1 - mm/memcontrol.c | 34 ++++++++++++---------------------- 2 files changed, 12 insertions(+), 23 deletions(-) (limited to 'mm') diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 5c831f1eca79..da62ee2be28b 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -5,7 +5,6 @@ enum { /* flags for mem_cgroup */ PCG_USED = 0x01, /* This page is charged to a memcg */ PCG_MEM = 0x02, /* This page holds a memory charge */ - PCG_MEMSW = 0x04, /* This page holds a memory+swap charge */ }; struct pglist_data; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 266a440c89f9..baf3b535b180 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2614,7 +2614,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, * have the page locked */ pc->mem_cgroup = memcg; - pc->flags = PCG_USED | PCG_MEM | (do_swap_account ? PCG_MEMSW : 0); + pc->flags = PCG_USED | PCG_MEM; if (lrucare) unlock_page_lru(page, isolated); @@ -5793,7 +5793,6 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!PageCgroupUsed(pc)) return; - VM_BUG_ON_PAGE(!(pc->flags & PCG_MEMSW), page); memcg = pc->mem_cgroup; oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); @@ -5989,17 +5988,16 @@ void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg) } static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, - unsigned long nr_mem, unsigned long nr_memsw, unsigned long nr_anon, unsigned long nr_file, unsigned long nr_huge, struct page *dummy_page) { + unsigned long nr_pages = nr_anon + nr_file; unsigned long flags; if (!mem_cgroup_is_root(memcg)) { - if (nr_mem) - page_counter_uncharge(&memcg->memory, nr_mem); - if (nr_memsw) - page_counter_uncharge(&memcg->memsw, nr_memsw); + page_counter_uncharge(&memcg->memory, nr_pages); + if (do_swap_account) + page_counter_uncharge(&memcg->memsw, nr_pages); memcg_oom_recover(memcg); } @@ -6008,23 +6006,21 @@ static void uncharge_batch(struct mem_cgroup *memcg, unsigned long pgpgout, __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); - __this_cpu_add(memcg->stat->nr_page_events, nr_anon + nr_file); + __this_cpu_add(memcg->stat->nr_page_events, nr_pages); memcg_check_events(memcg, dummy_page); local_irq_restore(flags); if (!mem_cgroup_is_root(memcg)) - css_put_many(&memcg->css, max(nr_mem, nr_memsw)); + css_put_many(&memcg->css, nr_pages); } static void uncharge_list(struct list_head *page_list) { struct mem_cgroup *memcg = NULL; - unsigned long nr_memsw = 0; unsigned long nr_anon = 0; unsigned long nr_file = 0; unsigned long nr_huge = 0; unsigned long pgpgout = 0; - unsigned long nr_mem = 0; struct list_head *next; struct page *page; @@ -6051,10 +6047,9 @@ static void uncharge_list(struct list_head *page_list) if (memcg != pc->mem_cgroup) { if (memcg) { - uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw, - nr_anon, nr_file, nr_huge, page); - pgpgout = nr_mem = nr_memsw = 0; - nr_anon = nr_file = nr_huge = 0; + uncharge_batch(memcg, pgpgout, nr_anon, nr_file, + nr_huge, page); + pgpgout = nr_anon = nr_file = nr_huge = 0; } memcg = pc->mem_cgroup; } @@ -6070,18 +6065,14 @@ static void uncharge_list(struct list_head *page_list) else nr_file += nr_pages; - if (pc->flags & PCG_MEM) - nr_mem += nr_pages; - if (pc->flags & PCG_MEMSW) - nr_memsw += nr_pages; pc->flags = 0; pgpgout++; } while (next != page_list); if (memcg) - uncharge_batch(memcg, pgpgout, nr_mem, nr_memsw, - nr_anon, nr_file, nr_huge, page); + uncharge_batch(memcg, pgpgout, nr_anon, nr_file, + nr_huge, page); } /** @@ -6166,7 +6157,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, return; VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage); - VM_BUG_ON_PAGE(do_swap_account && !(pc->flags & PCG_MEMSW), oldpage); if (lrucare) lock_page_lru(oldpage, &isolated); -- cgit v1.2.3 From f4aaa8b43d90294ca7546317997c452600e9a8a7 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:00 -0800 Subject: mm: memcontrol: remove unnecessary PCG_MEM memory charge flag PCG_MEM is a remnant from an earlier version of 0a31bc97c80c ("mm: memcontrol: rewrite uncharge API"), used to tell whether migration cleared a charge while leaving pc->mem_cgroup valid and PCG_USED set. But in the final version, mem_cgroup_migrate() directly uncharges the source page, rendering this distinction unnecessary. Remove it. Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Reviewed-by: Vladimir Davydov Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_cgroup.h | 1 - mm/memcontrol.c | 4 +--- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'mm') diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index da62ee2be28b..97536e685843 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -4,7 +4,6 @@ enum { /* flags for mem_cgroup */ PCG_USED = 0x01, /* This page is charged to a memcg */ - PCG_MEM = 0x02, /* This page holds a memory charge */ }; struct pglist_data; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index baf3b535b180..3dfb56a93117 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2614,7 +2614,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, * have the page locked */ pc->mem_cgroup = memcg; - pc->flags = PCG_USED | PCG_MEM; + pc->flags = PCG_USED; if (lrucare) unlock_page_lru(page, isolated); @@ -6156,8 +6156,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, if (!PageCgroupUsed(pc)) return; - VM_BUG_ON_PAGE(!(pc->flags & PCG_MEM), oldpage); - if (lrucare) lock_page_lru(oldpage, &isolated); -- cgit v1.2.3 From 2983331575bfb248abfb02efb5140b4a299e3f45 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:02 -0800 Subject: mm: memcontrol: remove unnecessary PCG_USED pc->mem_cgroup valid flag pc->mem_cgroup had to be left intact after uncharge for the final LRU removal, and !PCG_USED indicated whether the page was uncharged. But since commit 0a31bc97c80c ("mm: memcontrol: rewrite uncharge API") pages are uncharged after the final LRU removal. Uncharge can simply clear the pointer and the PCG_USED/PageCgroupUsed sites can test that instead. Because this is the last page_cgroup flag, this patch reduces the memcg per-page overhead to a single pointer. [akpm@linux-foundation.org: remove unneeded initialization of `memcg', per Michal] Signed-off-by: Johannes Weiner Cc: Hugh Dickins Acked-by: Michal Hocko Reviewed-by: Vladimir Davydov Acked-by: KAMEZAWA Hiroyuki Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/page_cgroup.h | 10 ----- mm/memcontrol.c | 107 +++++++++++++++++--------------------------- 2 files changed, 41 insertions(+), 76 deletions(-) (limited to 'mm') diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 97536e685843..1289be6b436c 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -1,11 +1,6 @@ #ifndef __LINUX_PAGE_CGROUP_H #define __LINUX_PAGE_CGROUP_H -enum { - /* flags for mem_cgroup */ - PCG_USED = 0x01, /* This page is charged to a memcg */ -}; - struct pglist_data; #ifdef CONFIG_MEMCG @@ -19,7 +14,6 @@ struct mem_cgroup; * then the page cgroup for pfn always exists. */ struct page_cgroup { - unsigned long flags; struct mem_cgroup *mem_cgroup; }; @@ -39,10 +33,6 @@ static inline void page_cgroup_init(void) struct page_cgroup *lookup_page_cgroup(struct page *page); -static inline int PageCgroupUsed(struct page_cgroup *pc) -{ - return !!(pc->flags & PCG_USED); -} #else /* !CONFIG_MEMCG */ struct page_cgroup; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3dfb56a93117..09fece0eb9f1 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1284,14 +1284,12 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) pc = lookup_page_cgroup(page); memcg = pc->mem_cgroup; - /* * Swapcache readahead pages are added to the LRU - and - * possibly migrated - before they are charged. Ensure - * pc->mem_cgroup is sane. + * possibly migrated - before they are charged. */ - if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup) - pc->mem_cgroup = memcg = root_mem_cgroup; + if (!memcg) + memcg = root_mem_cgroup; mz = mem_cgroup_page_zoneinfo(memcg, page); lruvec = &mz->lruvec; @@ -2151,7 +2149,7 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, pc = lookup_page_cgroup(page); again: memcg = pc->mem_cgroup; - if (unlikely(!memcg || !PageCgroupUsed(pc))) + if (unlikely(!memcg)) return NULL; *locked = false; @@ -2159,7 +2157,7 @@ again: return memcg; move_lock_mem_cgroup(memcg, flags); - if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) { + if (memcg != pc->mem_cgroup) { move_unlock_mem_cgroup(memcg, flags); goto again; } @@ -2525,7 +2523,7 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) */ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) { - struct mem_cgroup *memcg = NULL; + struct mem_cgroup *memcg; struct page_cgroup *pc; unsigned short id; swp_entry_t ent; @@ -2533,9 +2531,10 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) VM_BUG_ON_PAGE(!PageLocked(page), page); pc = lookup_page_cgroup(page); - if (PageCgroupUsed(pc)) { - memcg = pc->mem_cgroup; - if (memcg && !css_tryget_online(&memcg->css)) + memcg = pc->mem_cgroup; + + if (memcg) { + if (!css_tryget_online(&memcg->css)) memcg = NULL; } else if (PageSwapCache(page)) { ent.val = page_private(page); @@ -2586,7 +2585,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, struct page_cgroup *pc = lookup_page_cgroup(page); int isolated; - VM_BUG_ON_PAGE(PageCgroupUsed(pc), page); + VM_BUG_ON_PAGE(pc->mem_cgroup, page); /* * we don't need page_cgroup_lock about tail pages, becase they are not * accessed by any other context at this point. @@ -2601,7 +2600,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, /* * Nobody should be changing or seriously looking at - * pc->mem_cgroup and pc->flags at this point: + * pc->mem_cgroup at this point: * * - the page is uncharged * @@ -2614,7 +2613,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, * have the page locked */ pc->mem_cgroup = memcg; - pc->flags = PCG_USED; if (lrucare) unlock_page_lru(page, isolated); @@ -3126,37 +3124,22 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, memcg_uncharge_kmem(memcg, 1 << order); return; } - /* - * The page is freshly allocated and not visible to any - * outside callers yet. Set up pc non-atomically. - */ pc = lookup_page_cgroup(page); pc->mem_cgroup = memcg; - pc->flags = PCG_USED; } void __memcg_kmem_uncharge_pages(struct page *page, int order) { - struct mem_cgroup *memcg = NULL; - struct page_cgroup *pc; - - - pc = lookup_page_cgroup(page); - if (!PageCgroupUsed(pc)) - return; - - memcg = pc->mem_cgroup; - pc->flags = 0; + struct page_cgroup *pc = lookup_page_cgroup(page); + struct mem_cgroup *memcg = pc->mem_cgroup; - /* - * We trust that only if there is a memcg associated with the page, it - * is a valid allocation - */ if (!memcg) return; VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); + memcg_uncharge_kmem(memcg, 1 << order); + pc->mem_cgroup = NULL; } #else static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) @@ -3174,23 +3157,16 @@ static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) */ void mem_cgroup_split_huge_fixup(struct page *head) { - struct page_cgroup *head_pc; - struct page_cgroup *pc; - struct mem_cgroup *memcg; + struct page_cgroup *pc = lookup_page_cgroup(head); int i; if (mem_cgroup_disabled()) return; - head_pc = lookup_page_cgroup(head); + for (i = 1; i < HPAGE_PMD_NR; i++) + pc[i].mem_cgroup = pc[0].mem_cgroup; - memcg = head_pc->mem_cgroup; - for (i = 1; i < HPAGE_PMD_NR; i++) { - pc = head_pc + i; - pc->mem_cgroup = memcg; - pc->flags = head_pc->flags; - } - __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], + __this_cpu_sub(pc[0].mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], HPAGE_PMD_NR); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -3240,7 +3216,7 @@ static int mem_cgroup_move_account(struct page *page, goto out; ret = -EINVAL; - if (!PageCgroupUsed(pc) || pc->mem_cgroup != from) + if (pc->mem_cgroup != from) goto out_unlock; move_lock_mem_cgroup(from, &flags); @@ -3350,7 +3326,7 @@ static struct page_cgroup *lookup_page_cgroup_used(struct page *page) * the first time, i.e. during boot or memory hotplug; * or when mem_cgroup_disabled(). */ - if (likely(pc) && PageCgroupUsed(pc)) + if (likely(pc) && pc->mem_cgroup) return pc; return NULL; } @@ -3368,10 +3344,8 @@ void mem_cgroup_print_bad_page(struct page *page) struct page_cgroup *pc; pc = lookup_page_cgroup_used(page); - if (pc) { - pr_alert("pc:%p pc->flags:%lx pc->mem_cgroup:%p\n", - pc, pc->flags, pc->mem_cgroup); - } + if (pc) + pr_alert("pc:%p pc->mem_cgroup:%p\n", pc, pc->mem_cgroup); } #endif @@ -5308,7 +5282,7 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, * mem_cgroup_move_account() checks the pc is valid or * not under LRU exclusion. */ - if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { + if (pc->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; if (target) target->page = page; @@ -5344,7 +5318,7 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, if (!move_anon()) return ret; pc = lookup_page_cgroup(page); - if (PageCgroupUsed(pc) && pc->mem_cgroup == mc.from) { + if (pc->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; if (target) { get_page(page); @@ -5788,18 +5762,17 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) return; pc = lookup_page_cgroup(page); + memcg = pc->mem_cgroup; /* Readahead page, never charged */ - if (!PageCgroupUsed(pc)) + if (!memcg) return; - memcg = pc->mem_cgroup; - oldid = swap_cgroup_record(entry, mem_cgroup_id(memcg)); VM_BUG_ON_PAGE(oldid, page); mem_cgroup_swap_statistics(memcg, true); - pc->flags = 0; + pc->mem_cgroup = NULL; if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); @@ -5874,7 +5847,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, * the page lock, which serializes swap cache removal, which * in turn serializes uncharging. */ - if (PageCgroupUsed(pc)) + if (pc->mem_cgroup) goto out; } @@ -6036,13 +6009,13 @@ static void uncharge_list(struct list_head *page_list) VM_BUG_ON_PAGE(page_count(page), page); pc = lookup_page_cgroup(page); - if (!PageCgroupUsed(pc)) + if (!pc->mem_cgroup) continue; /* * Nobody should be changing or seriously looking at - * pc->mem_cgroup and pc->flags at this point, we have - * fully exclusive access to the page. + * pc->mem_cgroup at this point, we have fully + * exclusive access to the page. */ if (memcg != pc->mem_cgroup) { @@ -6065,7 +6038,7 @@ static void uncharge_list(struct list_head *page_list) else nr_file += nr_pages; - pc->flags = 0; + pc->mem_cgroup = NULL; pgpgout++; } while (next != page_list); @@ -6091,7 +6064,7 @@ void mem_cgroup_uncharge(struct page *page) /* Don't touch page->lru of any random page, pre-check: */ pc = lookup_page_cgroup(page); - if (!PageCgroupUsed(pc)) + if (!pc->mem_cgroup) return; INIT_LIST_HEAD(&page->lru); @@ -6127,6 +6100,7 @@ void mem_cgroup_uncharge_list(struct list_head *page_list) void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, bool lrucare) { + struct mem_cgroup *memcg; struct page_cgroup *pc; int isolated; @@ -6143,7 +6117,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, /* Page cache replacement: new page already charged? */ pc = lookup_page_cgroup(newpage); - if (PageCgroupUsed(pc)) + if (pc->mem_cgroup) return; /* @@ -6153,18 +6127,19 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, * reclaim just put back on the LRU but has not released yet. */ pc = lookup_page_cgroup(oldpage); - if (!PageCgroupUsed(pc)) + memcg = pc->mem_cgroup; + if (!memcg) return; if (lrucare) lock_page_lru(oldpage, &isolated); - pc->flags = 0; + pc->mem_cgroup = NULL; if (lrucare) unlock_page_lru(oldpage, isolated); - commit_charge(newpage, pc->mem_cgroup, lrucare); + commit_charge(newpage, memcg, lrucare); } /* -- cgit v1.2.3 From 354a4783a2ee5ba1cb5a1442cca8ecd4c0ac6d66 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:05 -0800 Subject: mm: memcontrol: inline memcg->move_lock locking The wrappers around taking and dropping the memcg->move_lock spinlock add nothing of value. Inline the spinlock calls into the callsites. Signed-off-by: Johannes Weiner Acked-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 28 ++++++---------------------- 1 file changed, 6 insertions(+), 22 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 09fece0eb9f1..a5c9aa4688e8 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1522,23 +1522,6 @@ static bool mem_cgroup_wait_acct_move(struct mem_cgroup *memcg) return false; } -/* - * Take this lock when - * - a code tries to modify page's memcg while it's USED. - * - a code tries to modify page state accounting in a memcg. - */ -static void move_lock_mem_cgroup(struct mem_cgroup *memcg, - unsigned long *flags) -{ - spin_lock_irqsave(&memcg->move_lock, *flags); -} - -static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, - unsigned long *flags) -{ - spin_unlock_irqrestore(&memcg->move_lock, *flags); -} - #define K(x) ((x) << (PAGE_SHIFT-10)) /** * mem_cgroup_print_oom_info: Print OOM information relevant to memory controller. @@ -2156,9 +2139,9 @@ again: if (atomic_read(&memcg->moving_account) <= 0) return memcg; - move_lock_mem_cgroup(memcg, flags); + spin_lock_irqsave(&memcg->move_lock, *flags); if (memcg != pc->mem_cgroup) { - move_unlock_mem_cgroup(memcg, flags); + spin_unlock_irqrestore(&memcg->move_lock, *flags); goto again; } *locked = true; @@ -2176,7 +2159,7 @@ void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, unsigned long flags) { if (memcg && locked) - move_unlock_mem_cgroup(memcg, &flags); + spin_unlock_irqrestore(&memcg->move_lock, flags); rcu_read_unlock(); } @@ -3219,7 +3202,7 @@ static int mem_cgroup_move_account(struct page *page, if (pc->mem_cgroup != from) goto out_unlock; - move_lock_mem_cgroup(from, &flags); + spin_lock_irqsave(&from->move_lock, flags); if (!PageAnon(page) && page_mapped(page)) { __this_cpu_sub(from->stat->count[MEM_CGROUP_STAT_FILE_MAPPED], @@ -3243,7 +3226,8 @@ static int mem_cgroup_move_account(struct page *page, /* caller should have done css_get */ pc->mem_cgroup = to; - move_unlock_mem_cgroup(from, &flags); + spin_unlock_irqrestore(&from->move_lock, flags); + ret = 0; local_irq_disable(); -- cgit v1.2.3 From 4e2f245d38ba86e3922c6c188fe4a0d0688aed88 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:08 -0800 Subject: mm: memcontrol: don't pass a NULL memcg to mem_cgroup_end_move() mem_cgroup_end_move() checks if the passed memcg is NULL, along with a lengthy comment to explain why this seemingly non-sensical situation is even possible. Check in cancel_attach() itself whether can_attach() set up the move context or not, it's a lot more obvious from there. Then remove the check and comment in mem_cgroup_end_move(). Signed-off-by: Johannes Weiner Acked-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index a5c9aa4688e8..3cd4f1e0bfb3 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1469,12 +1469,7 @@ static void mem_cgroup_start_move(struct mem_cgroup *memcg) static void mem_cgroup_end_move(struct mem_cgroup *memcg) { - /* - * Now, mem_cgroup_clear_mc() may call this function with NULL. - * We check NULL in callee rather than caller. - */ - if (memcg) - atomic_dec(&memcg->moving_account); + atomic_dec(&memcg->moving_account); } /* @@ -5489,7 +5484,8 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, static void mem_cgroup_cancel_attach(struct cgroup_subsys_state *css, struct cgroup_taskset *tset) { - mem_cgroup_clear_mc(); + if (mc.to) + mem_cgroup_clear_mc(); } static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, -- cgit v1.2.3 From 247b1447b6ccb2890cefc370f8e204592a70774d Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:11 -0800 Subject: mm: memcontrol: fold mem_cgroup_start_move()/mem_cgroup_end_move() Having these functions and their documentation split out and somewhere makes it harder, not easier, to follow what's going on. Inline them directly where charge moving is prepared and finished, and put an explanation right next to it. Signed-off-by: Johannes Weiner Cc: Michal Hocko Cc: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 40 ++++++++++++---------------------------- 1 file changed, 12 insertions(+), 28 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3cd4f1e0bfb3..3734fd6d1132 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1446,32 +1446,6 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg) return memcg->swappiness; } -/* - * memcg->moving_account is used for checking possibility that some thread is - * calling move_account(). When a thread on CPU-A starts moving pages under - * a memcg, other threads should check memcg->moving_account under - * rcu_read_lock(), like this: - * - * CPU-A CPU-B - * rcu_read_lock() - * memcg->moving_account+1 if (memcg->mocing_account) - * take heavy locks. - * synchronize_rcu() update something. - * rcu_read_unlock() - * start move here. - */ - -static void mem_cgroup_start_move(struct mem_cgroup *memcg) -{ - atomic_inc(&memcg->moving_account); - synchronize_rcu(); -} - -static void mem_cgroup_end_move(struct mem_cgroup *memcg) -{ - atomic_dec(&memcg->moving_account); -} - /* * A routine for checking "mem" is under move_account() or not. * @@ -5431,7 +5405,8 @@ static void mem_cgroup_clear_mc(void) mc.from = NULL; mc.to = NULL; spin_unlock(&mc.lock); - mem_cgroup_end_move(from); + + atomic_dec(&from->moving_account); } static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, @@ -5464,7 +5439,16 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, VM_BUG_ON(mc.precharge); VM_BUG_ON(mc.moved_charge); VM_BUG_ON(mc.moved_swap); - mem_cgroup_start_move(from); + + /* + * Signal mem_cgroup_begin_page_stat() to take + * the memcg's move_lock while we're moving + * its pages to another memcg. Then wait for + * already started RCU-only updates to finish. + */ + atomic_inc(&from->moving_account); + synchronize_rcu(); + spin_lock(&mc.lock); mc.from = from; mc.to = memcg; -- cgit v1.2.3 From 4ef461e8f4dd13a2e64c6c8f00c420d62294e2d4 Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Dec 2014 15:44:16 -0800 Subject: memcg: remove mem_cgroup_reclaimable check from soft reclaim mem_cgroup_reclaimable() checks whether a cgroup has reclaimable pages on *any* NUMA node. However, the only place where it's called is mem_cgroup_soft_reclaim(), which tries to reclaim memory from a *specific* zone. So the way it is used is incorrect - it will return true even if the cgroup doesn't have pages on the zone we're scanning. I think we can get rid of this check completely, because mem_cgroup_shrink_node_zone(), which is called by mem_cgroup_soft_reclaim() if mem_cgroup_reclaimable() returns true, is equivalent to shrink_lruvec(), which exits almost immediately if the lruvec passed to it is empty. So there's no need to optimize anything here. Besides, we don't have such a check in the general scan path (shrink_zone) either. Signed-off-by: Vladimir Davydov Acked-by: Michal Hocko Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 43 ------------------------------------------- 1 file changed, 43 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 3734fd6d1132..32e3b191857d 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1743,52 +1743,11 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) memcg->last_scanned_node = node; return node; } - -/* - * Check all nodes whether it contains reclaimable pages or not. - * For quick scan, we make use of scan_nodes. This will allow us to skip - * unused nodes. But scan_nodes is lazily updated and may not cotain - * enough new information. We need to do double check. - */ -static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) -{ - int nid; - - /* - * quick check...making use of scan_node. - * We can skip unused nodes. - */ - if (!nodes_empty(memcg->scan_nodes)) { - for (nid = first_node(memcg->scan_nodes); - nid < MAX_NUMNODES; - nid = next_node(nid, memcg->scan_nodes)) { - - if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) - return true; - } - } - /* - * Check rest of nodes. - */ - for_each_node_state(nid, N_MEMORY) { - if (node_isset(nid, memcg->scan_nodes)) - continue; - if (test_mem_cgroup_node_reclaimable(memcg, nid, noswap)) - return true; - } - return false; -} - #else int mem_cgroup_select_victim_node(struct mem_cgroup *memcg) { return 0; } - -static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) -{ - return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); -} #endif static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, @@ -1832,8 +1791,6 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg, } continue; } - if (!mem_cgroup_reclaimable(victim, false)) - continue; total += mem_cgroup_shrink_node_zone(victim, gfp_mask, false, zone, &nr_scanned); *total_scanned += nr_scanned; -- cgit v1.2.3 From b047501cd9f11d5e1d54ea0f90e2b10754021a0e Mon Sep 17 00:00:00 2001 From: Vladimir Davydov Date: Wed, 10 Dec 2014 15:44:19 -0800 Subject: memcg: use generic slab iterators for showing slabinfo Let's use generic slab_start/next/stop for showing memcg caches info. In contrast to the current implementation, this will work even if all memcg caches' info doesn't fit into a seq buffer (a page), plus it simply looks neater. Actually, the main reason I do this isn't mere cleanup. I'm going to zap the memcg_slab_caches list, because I find it useless provided we have the slab_caches list, and this patch is a step in this direction. It should be noted that before this patch an attempt to read memory.kmem.slabinfo of a cgroup that doesn't have kmem limit set resulted in -EIO, while after this patch it will silently show nothing except the header, but I don't think it will frustrate anyone. Signed-off-by: Vladimir Davydov Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Johannes Weiner Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/slab.h | 4 ---- mm/memcontrol.c | 25 ++++--------------------- mm/slab.h | 1 + mm/slab_common.c | 25 +++++++++++++++++++------ 4 files changed, 24 insertions(+), 31 deletions(-) (limited to 'mm') diff --git a/include/linux/slab.h b/include/linux/slab.h index c265bec6a57d..8a2457d42fc8 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -513,10 +513,6 @@ struct memcg_cache_params { int memcg_update_all_caches(int num_memcgs); -struct seq_file; -int cache_show(struct kmem_cache *s, struct seq_file *m); -void print_slabinfo_header(struct seq_file *m); - /** * kmalloc_array - allocate memory for an array. * @n: number of elements. diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 32e3b191857d..9d30129b0d4a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2547,26 +2547,6 @@ static struct kmem_cache *memcg_params_to_cache(struct memcg_cache_params *p) return cache_from_memcg_idx(cachep, memcg_cache_id(p->memcg)); } -#ifdef CONFIG_SLABINFO -static int mem_cgroup_slabinfo_read(struct seq_file *m, void *v) -{ - struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); - struct memcg_cache_params *params; - - if (!memcg_kmem_is_active(memcg)) - return -EIO; - - print_slabinfo_header(m); - - mutex_lock(&memcg_slab_mutex); - list_for_each_entry(params, &memcg->memcg_slab_caches, list) - cache_show(memcg_params_to_cache(params), m); - mutex_unlock(&memcg_slab_mutex); - - return 0; -} -#endif - static int memcg_charge_kmem(struct mem_cgroup *memcg, gfp_t gfp, unsigned long nr_pages) { @@ -4708,7 +4688,10 @@ static struct cftype mem_cgroup_files[] = { #ifdef CONFIG_SLABINFO { .name = "kmem.slabinfo", - .seq_show = mem_cgroup_slabinfo_read, + .seq_start = slab_start, + .seq_next = slab_next, + .seq_stop = slab_stop, + .seq_show = memcg_slab_show, }, #endif #endif diff --git a/mm/slab.h b/mm/slab.h index 078acbcf64e8..1cf4005482dd 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -360,5 +360,6 @@ static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) void *slab_start(struct seq_file *m, loff_t *pos); void *slab_next(struct seq_file *m, void *p, loff_t *pos); void slab_stop(struct seq_file *m, void *p); +int memcg_slab_show(struct seq_file *m, void *p); #endif /* MM_SLAB_H */ diff --git a/mm/slab_common.c b/mm/slab_common.c index 2a3f5ff410cf..e03dd6f2a272 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -811,7 +811,7 @@ EXPORT_SYMBOL(kmalloc_order_trace); #define SLABINFO_RIGHTS S_IRUSR #endif -void print_slabinfo_header(struct seq_file *m) +static void print_slabinfo_header(struct seq_file *m) { /* * Output format version, so at least we can change it @@ -876,7 +876,7 @@ memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info) } } -int cache_show(struct kmem_cache *s, struct seq_file *m) +static void cache_show(struct kmem_cache *s, struct seq_file *m) { struct slabinfo sinfo; @@ -895,7 +895,6 @@ int cache_show(struct kmem_cache *s, struct seq_file *m) sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail); slabinfo_show_stats(m, s); seq_putc(m, '\n'); - return 0; } static int slab_show(struct seq_file *m, void *p) @@ -904,10 +903,24 @@ static int slab_show(struct seq_file *m, void *p) if (p == slab_caches.next) print_slabinfo_header(m); - if (!is_root_cache(s)) - return 0; - return cache_show(s, m); + if (is_root_cache(s)) + cache_show(s, m); + return 0; +} + +#ifdef CONFIG_MEMCG_KMEM +int memcg_slab_show(struct seq_file *m, void *p) +{ + struct kmem_cache *s = list_entry(p, struct kmem_cache, list); + struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); + + if (p == slab_caches.next) + print_slabinfo_header(m); + if (!is_root_cache(s) && s->memcg_params->memcg == memcg) + cache_show(s, m); + return 0; } +#endif /* * slabinfo_op - iterator that generates /proc/slabinfo -- cgit v1.2.3 From e544a4e74e02108035de69f97fde7bdf19dba978 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Wed, 10 Dec 2014 15:44:22 -0800 Subject: thp: do not mark zero-page pmd write-protected explicitly Zero pages can be used only in anonymous mappings, which never have writable vma->vm_page_prot: see protection_map in mm/mmap.c and __PX1X definitions. Let's drop redundant pmd_wrprotect() in set_huge_zero_page(). Signed-off-by: "Kirill A. Shutemov" Cc: Andrea Arcangeli Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm') diff --git a/mm/huge_memory.c b/mm/huge_memory.c index de984159cf0b..5b2c6875fc38 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -784,7 +784,6 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, if (!pmd_none(*pmd)) return false; entry = mk_pmd(zero_page, vma->vm_page_prot); - entry = pmd_wrprotect(entry); entry = pmd_mkhuge(entry); pgtable_trans_huge_deposit(mm, pmd, pgtable); set_pmd_at(mm, haddr, pmd, entry); -- cgit v1.2.3 From 312722cbb2a6e12b74177f025a8ee7189816b04b Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:25 -0800 Subject: mm: memcontrol: shorten the page statistics update slowpath While moving charges from one memcg to another, page stat updates must acquire the old memcg's move_lock to prevent double accounting. That situation is denoted by an increased memcg->move_accounting. However, the charge moving code declares this way too early for now, even before summing up the RSS and pre-allocating destination charges. Shorten this slowpath mode by increasing memcg->move_accounting only right before walking the task's address space with the intention of actually moving the pages. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Reviewed-by: Vladimir Davydov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9d30129b0d4a..9073d07c1149 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -5333,8 +5333,6 @@ static void __mem_cgroup_clear_mc(void) static void mem_cgroup_clear_mc(void) { - struct mem_cgroup *from = mc.from; - /* * we must clear moving_task before waking up waiters at the end of * task migration. @@ -5345,8 +5343,6 @@ static void mem_cgroup_clear_mc(void) mc.from = NULL; mc.to = NULL; spin_unlock(&mc.lock); - - atomic_dec(&from->moving_account); } static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, @@ -5380,15 +5376,6 @@ static int mem_cgroup_can_attach(struct cgroup_subsys_state *css, VM_BUG_ON(mc.moved_charge); VM_BUG_ON(mc.moved_swap); - /* - * Signal mem_cgroup_begin_page_stat() to take - * the memcg's move_lock while we're moving - * its pages to another memcg. Then wait for - * already started RCU-only updates to finish. - */ - atomic_inc(&from->moving_account); - synchronize_rcu(); - spin_lock(&mc.lock); mc.from = from; mc.to = memcg; @@ -5520,6 +5507,13 @@ static void mem_cgroup_move_charge(struct mm_struct *mm) struct vm_area_struct *vma; lru_add_drain_all(); + /* + * Signal mem_cgroup_begin_page_stat() to take the memcg's + * move_lock while we're moving its pages to another memcg. + * Then wait for already started RCU-only updates to finish. + */ + atomic_inc(&mc.from->moving_account); + synchronize_rcu(); retry: if (unlikely(!down_read_trylock(&mm->mmap_sem))) { /* @@ -5552,6 +5546,7 @@ retry: break; } up_read(&mm->mmap_sem); + atomic_dec(&mc.from->moving_account); } static void mem_cgroup_move_task(struct cgroup_subsys_state *css, -- cgit v1.2.3 From c01f46c7c725f0a1330673bc3c767b89a3b2dbc0 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:27 -0800 Subject: mm: memcontrol: remove bogus NULL check after mem_cgroup_from_task() That function acts like a typecast - unless NULL is passed in, no NULL can come out. task_in_mem_cgroup() callers don't pass NULL tasks. Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 9073d07c1149..367cc57df362 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1356,7 +1356,7 @@ static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, bool task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) { - struct mem_cgroup *curr = NULL; + struct mem_cgroup *curr; struct task_struct *p; bool ret; @@ -1372,8 +1372,7 @@ bool task_in_mem_cgroup(struct task_struct *task, */ rcu_read_lock(); curr = mem_cgroup_from_task(task); - if (curr) - css_get(&curr->css); + css_get(&curr->css); rcu_read_unlock(); } /* -- cgit v1.2.3 From 413918bb61b4fa027baa3e79546c47f15e4b9ea8 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:30 -0800 Subject: mm: memcontrol: pull the NULL check from __mem_cgroup_same_or_subtree() The NULL in mm_match_cgroup() comes from a possibly exiting mm->owner. It makes a lot more sense to check where it's looked up, rather than check for it in __mem_cgroup_same_or_subtree() where it's unexpected. No other callsite passes NULL to __mem_cgroup_same_or_subtree(). Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 5 +++-- mm/memcontrol.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index ea007615e8f9..e32ab948f589 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -83,11 +83,12 @@ static inline bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; - bool match; + bool match = false; rcu_read_lock(); task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); - match = __mem_cgroup_same_or_subtree(memcg, task_memcg); + if (task_memcg) + match = __mem_cgroup_same_or_subtree(memcg, task_memcg); rcu_read_unlock(); return match; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 367cc57df362..e5dcebd71dfb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1337,7 +1337,7 @@ bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, { if (root_memcg == memcg) return true; - if (!root_memcg->use_hierarchy || !memcg) + if (!root_memcg->use_hierarchy) return false; return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup); } -- cgit v1.2.3 From 2314b42db67be30b747122d65c6cd2c85da34538 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:33 -0800 Subject: mm: memcontrol: drop bogus RCU locking from mem_cgroup_same_or_subtree() None of the mem_cgroup_same_or_subtree() callers actually require it to take the RCU lock, either because they hold it themselves or they have css references. Remove it. To make the API change clear, rename the leftover helper to mem_cgroup_is_descendant() to match cgroup_is_descendant(). Signed-off-by: Johannes Weiner Reviewed-by: Vladimir Davydov Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 13 +++++----- mm/memcontrol.c | 59 +++++++++++++--------------------------------- mm/oom_kill.c | 4 ++-- 3 files changed, 24 insertions(+), 52 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index e32ab948f589..d4575a1d6e99 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -68,10 +68,9 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *); -bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg); -bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg); +bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, + struct mem_cgroup *root); +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg); extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); @@ -79,8 +78,8 @@ extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); extern struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg); extern struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css); -static inline -bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) +static inline bool mm_match_cgroup(struct mm_struct *mm, + struct mem_cgroup *memcg) { struct mem_cgroup *task_memcg; bool match = false; @@ -88,7 +87,7 @@ bool mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *memcg) rcu_read_lock(); task_memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (task_memcg) - match = __mem_cgroup_same_or_subtree(memcg, task_memcg); + match = mem_cgroup_is_descendant(task_memcg, memcg); rcu_read_unlock(); return match; } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e5dcebd71dfb..b841bf430179 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1328,41 +1328,24 @@ void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru, VM_BUG_ON((long)(*lru_size) < 0); } -/* - * Checks whether given mem is same or in the root_mem_cgroup's - * hierarchy subtree - */ -bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg) +bool mem_cgroup_is_descendant(struct mem_cgroup *memcg, struct mem_cgroup *root) { - if (root_memcg == memcg) + if (root == memcg) return true; - if (!root_memcg->use_hierarchy) + if (!root->use_hierarchy) return false; - return cgroup_is_descendant(memcg->css.cgroup, root_memcg->css.cgroup); -} - -static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, - struct mem_cgroup *memcg) -{ - bool ret; - - rcu_read_lock(); - ret = __mem_cgroup_same_or_subtree(root_memcg, memcg); - rcu_read_unlock(); - return ret; + return cgroup_is_descendant(memcg->css.cgroup, root->css.cgroup); } -bool task_in_mem_cgroup(struct task_struct *task, - const struct mem_cgroup *memcg) +bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg) { - struct mem_cgroup *curr; + struct mem_cgroup *task_memcg; struct task_struct *p; bool ret; p = find_lock_task_mm(task); if (p) { - curr = get_mem_cgroup_from_mm(p->mm); + task_memcg = get_mem_cgroup_from_mm(p->mm); task_unlock(p); } else { /* @@ -1371,18 +1354,12 @@ bool task_in_mem_cgroup(struct task_struct *task, * killed to prevent needlessly killing additional tasks. */ rcu_read_lock(); - curr = mem_cgroup_from_task(task); - css_get(&curr->css); + task_memcg = mem_cgroup_from_task(task); + css_get(&task_memcg->css); rcu_read_unlock(); } - /* - * We should check use_hierarchy of "memcg" not "curr". Because checking - * use_hierarchy of "curr" here make this function true if hierarchy is - * enabled in "curr" and "curr" is a child of "memcg" in *cgroup* - * hierarchy(even if use_hierarchy is disabled in "memcg"). - */ - ret = mem_cgroup_same_or_subtree(memcg, curr); - css_put(&curr->css); + ret = mem_cgroup_is_descendant(task_memcg, memcg); + css_put(&task_memcg->css); return ret; } @@ -1467,8 +1444,8 @@ static bool mem_cgroup_under_move(struct mem_cgroup *memcg) if (!from) goto unlock; - ret = mem_cgroup_same_or_subtree(memcg, from) - || mem_cgroup_same_or_subtree(memcg, to); + ret = mem_cgroup_is_descendant(from, memcg) || + mem_cgroup_is_descendant(to, memcg); unlock: spin_unlock(&mc.lock); return ret; @@ -1900,12 +1877,8 @@ static int memcg_oom_wake_function(wait_queue_t *wait, oom_wait_info = container_of(wait, struct oom_wait_info, wait); oom_wait_memcg = oom_wait_info->memcg; - /* - * Both of oom_wait_info->memcg and wake_memcg are stable under us. - * Then we can use css_is_ancestor without taking care of RCU. - */ - if (!mem_cgroup_same_or_subtree(oom_wait_memcg, wake_memcg) - && !mem_cgroup_same_or_subtree(wake_memcg, oom_wait_memcg)) + if (!mem_cgroup_is_descendant(wake_memcg, oom_wait_memcg) && + !mem_cgroup_is_descendant(oom_wait_memcg, wake_memcg)) return 0; return autoremove_wake_function(wait, mode, sync, arg); } @@ -2225,7 +2198,7 @@ static void drain_all_stock(struct mem_cgroup *root_memcg) memcg = stock->cached; if (!memcg || !stock->nr_pages) continue; - if (!mem_cgroup_same_or_subtree(root_memcg, memcg)) + if (!mem_cgroup_is_descendant(memcg, root_memcg)) continue; if (!test_and_set_bit(FLUSHING_CACHED_CHARGE, &stock->flags)) { if (cpu == curcpu) diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 5340f6b91312..3b014d326151 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c @@ -119,7 +119,7 @@ found: /* return true if the task is not adequate as candidate victim task. */ static bool oom_unkillable_task(struct task_struct *p, - const struct mem_cgroup *memcg, const nodemask_t *nodemask) + struct mem_cgroup *memcg, const nodemask_t *nodemask) { if (is_global_init(p)) return true; @@ -353,7 +353,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints, * State information includes task's pid, uid, tgid, vm size, rss, nr_ptes, * swapents, oom_score_adj value, and name. */ -static void dump_tasks(const struct mem_cgroup *memcg, const nodemask_t *nodemask) +static void dump_tasks(struct mem_cgroup *memcg, const nodemask_t *nodemask) { struct task_struct *p; struct task_struct *task; -- cgit v1.2.3 From e4bd6a0248b2a026e07c19995c41a4cb5a49d797 Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Wed, 10 Dec 2014 15:44:39 -0800 Subject: mm, memcg: fix potential undefined behaviour in page stat accounting Since commit d7365e783edb ("mm: memcontrol: fix missed end-writeback page accounting") mem_cgroup_end_page_stat consumes locked and flags variables directly rather than via pointers which might trigger C undefined behavior as those variables are initialized only in the slow path of mem_cgroup_begin_page_stat. Although mem_cgroup_end_page_stat handles parameters correctly and touches them only when they hold a sensible value it is caller which loads a potentially uninitialized value which then might allow compiler to do crazy things. I haven't seen any warning from gcc and it seems that the current version (4.9) doesn't exploit this type undefined behavior but Sasha has reported the following: UBSan: Undefined behaviour in mm/rmap.c:1084:2 load of value 255 is not a valid value for type '_Bool' CPU: 4 PID: 8304 Comm: rngd Not tainted 3.18.0-rc2-next-20141029-sasha-00039-g77ed13d-dirty #1427 Call Trace: dump_stack (lib/dump_stack.c:52) ubsan_epilogue (lib/ubsan.c:159) __ubsan_handle_load_invalid_value (lib/ubsan.c:482) page_remove_rmap (mm/rmap.c:1084 mm/rmap.c:1096) unmap_page_range (./arch/x86/include/asm/atomic.h:27 include/linux/mm.h:463 mm/memory.c:1146 mm/memory.c:1258 mm/memory.c:1279 mm/memory.c:1303) unmap_single_vma (mm/memory.c:1348) unmap_vmas (mm/memory.c:1377 (discriminator 3)) exit_mmap (mm/mmap.c:2837) mmput (kernel/fork.c:659) do_exit (./arch/x86/include/asm/thread_info.h:168 kernel/exit.c:462 kernel/exit.c:747) do_group_exit (include/linux/sched.h:775 kernel/exit.c:873) SyS_exit_group (kernel/exit.c:901) tracesys_phase2 (arch/x86/kernel/entry_64.S:529) Fix this by using pointer parameters for both locked and flags and be more robust for future compiler changes even though the current code is implemented correctly. Signed-off-by: Michal Hocko Reported-by: Sasha Levin Acked-by: Johannes Weiner Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 6 +++--- mm/memcontrol.c | 8 ++++---- mm/page-writeback.c | 4 ++-- mm/rmap.c | 4 ++-- 4 files changed, 11 insertions(+), 11 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index d4575a1d6e99..de018766be45 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -141,8 +141,8 @@ static inline bool mem_cgroup_disabled(void) struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked, unsigned long *flags); -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, - unsigned long flags); +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, + unsigned long *flags); void mem_cgroup_update_page_stat(struct mem_cgroup *memcg, enum mem_cgroup_stat_index idx, int val); @@ -297,7 +297,7 @@ static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, } static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, - bool locked, unsigned long flags) + bool *locked, unsigned long *flags) { } diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b841bf430179..031ca345677b 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2053,11 +2053,11 @@ again: * @locked: value received from mem_cgroup_begin_page_stat() * @flags: value received from mem_cgroup_begin_page_stat() */ -void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked, - unsigned long flags) +void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool *locked, + unsigned long *flags) { - if (memcg && locked) - spin_unlock_irqrestore(&memcg->move_lock, flags); + if (memcg && *locked) + spin_unlock_irqrestore(&memcg->move_lock, *flags); rcu_read_unlock(); } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 19ceae87522d..d5d81f5384d1 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2357,7 +2357,7 @@ int test_clear_page_writeback(struct page *page) dec_zone_page_state(page, NR_WRITEBACK); inc_zone_page_state(page, NR_WRITTEN); } - mem_cgroup_end_page_stat(memcg, locked, memcg_flags); + mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags); return ret; } @@ -2399,7 +2399,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write) mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK); inc_zone_page_state(page, NR_WRITEBACK); } - mem_cgroup_end_page_stat(memcg, locked, memcg_flags); + mem_cgroup_end_page_stat(memcg, &locked, &memcg_flags); return ret; } diff --git a/mm/rmap.c b/mm/rmap.c index 3e4c7213210c..45eba36fd673 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1053,7 +1053,7 @@ void page_add_file_rmap(struct page *page) __inc_zone_page_state(page, NR_FILE_MAPPED); mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED); } - mem_cgroup_end_page_stat(memcg, locked, flags); + mem_cgroup_end_page_stat(memcg, &locked, &flags); } static void page_remove_file_rmap(struct page *page) @@ -1083,7 +1083,7 @@ static void page_remove_file_rmap(struct page *page) if (unlikely(PageMlocked(page))) clear_page_mlock(page); out: - mem_cgroup_end_page_stat(memcg, locked, flags); + mem_cgroup_end_page_stat(memcg, &locked, &flags); } /** -- cgit v1.2.3 From 569f48b85813f053aeab35429ba1657cb7f426db Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Wed, 10 Dec 2014 15:44:41 -0800 Subject: mm: hugetlb: fix __unmap_hugepage_range() First, after flushing TLB, we have no need to scan pte from start again. Second, before bail out loop, the address is forwarded one step. Signed-off-by: Hillf Danton Reviewed-by: Michal Hocko Acked-by: David Rientjes Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 9fd722769927..30cd96879152 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2638,8 +2638,9 @@ void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, tlb_start_vma(tlb, vma); mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); + address = start; again: - for (address = start; address < end; address += sz) { + for (; address < end; address += sz) { ptep = huge_pte_offset(mm, address); if (!ptep) continue; @@ -2686,6 +2687,7 @@ again: page_remove_rmap(page); force_flush = !__tlb_remove_page(tlb, page); if (force_flush) { + address += sz; spin_unlock(ptl); break; } -- cgit v1.2.3 From 26086de3fcc9694b9f9c1517e4fd5d1d33d6eb8c Mon Sep 17 00:00:00 2001 From: Wei Yuan Date: Wed, 10 Dec 2014 15:44:44 -0800 Subject: mm: fix a spelling mistake Signed-off-by Wei Yuan Acked-by: Rik van Riel Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/page_alloc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7352aa45a335..97b6966816e5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -1739,7 +1739,7 @@ static bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark, int classzone_idx, int alloc_flags, long free_pages) { - /* free_pages my go negative - that's OK */ + /* free_pages may go negative - that's OK */ long min = mark; int o; long free_cma = 0; -- cgit v1.2.3 From a1ad28973d3d9fe23fa73fbb66fba077a987dd81 Mon Sep 17 00:00:00 2001 From: Li Haifeng Date: Wed, 10 Dec 2014 15:44:47 -0800 Subject: mm/frontswap.c: fix the condition in BUG_ON The largest index of swap device is MAX_SWAPFILES-1. So the type should be less than MAX_SWAPFILES. Signed-off-by: Haifeng Li Acked-by: Konrad Rzeszutek Wilk Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/frontswap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/frontswap.c b/mm/frontswap.c index f2a3571c6e22..8d82809eb085 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -182,7 +182,7 @@ void __frontswap_init(unsigned type, unsigned long *map) if (frontswap_ops) frontswap_ops->init(type); else { - BUG_ON(type > MAX_SWAPFILES); + BUG_ON(type >= MAX_SWAPFILES); set_bit(type, need_init); } } -- cgit v1.2.3 From 22811c6bc3c764d8935383ad0ddd7a96b45d75dc Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:50 -0800 Subject: mm: memcontrol: remove stale page_cgroup_lock comment There is no cgroup-specific page lock anymore. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 4 ---- 1 file changed, 4 deletions(-) (limited to 'mm') diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 031ca345677b..78cb3b05a9fa 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2467,10 +2467,6 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, int isolated; VM_BUG_ON_PAGE(pc->mem_cgroup, page); - /* - * we don't need page_cgroup_lock about tail pages, becase they are not - * accessed by any other context at this point. - */ /* * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page -- cgit v1.2.3 From 1306a85aed3ec3db98945aafb7dfbe5648a1203c Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:52 -0800 Subject: mm: embed the memcg pointer directly into struct page Memory cgroups used to have 5 per-page pointers. To allow users to disable that amount of overhead during runtime, those pointers were allocated in a separate array, with a translation layer between them and struct page. There is now only one page pointer remaining: the memcg pointer, that indicates which cgroup the page is associated with when charged. The complexity of runtime allocation and the runtime translation overhead is no longer justified to save that *potential* 0.19% of memory. With CONFIG_SLUB, page->mem_cgroup actually sits in the doubleword padding after the page->private member and doesn't even increase struct page, and then this patch actually saves space. Remaining users that care can still compile their kernels without CONFIG_MEMCG. text data bss dec hex filename 8828345 1725264 983040 11536649 b00909 vmlinux.old 8827425 1725264 966656 11519345 afc571 vmlinux.new [mhocko@suse.cz: update Documentation/cgroups/memory.txt] Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Acked-by: David S. Miller Acked-by: KAMEZAWA Hiroyuki Cc: "Kirill A. Shutemov" Cc: Michal Hocko Cc: Vladimir Davydov Cc: Tejun Heo Cc: Joonsoo Kim Acked-by: Konstantin Khlebnikov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- Documentation/cgroups/memory.txt | 5 + include/linux/memcontrol.h | 6 +- include/linux/mm_types.h | 5 + include/linux/mmzone.h | 12 -- include/linux/page_cgroup.h | 53 ------- init/main.c | 7 - mm/memcontrol.c | 124 +++++---------- mm/page_alloc.c | 2 - mm/page_cgroup.c | 319 --------------------------------------- 9 files changed, 46 insertions(+), 487 deletions(-) (limited to 'mm') diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt index 67613ff0270c..46b2b5080317 100644 --- a/Documentation/cgroups/memory.txt +++ b/Documentation/cgroups/memory.txt @@ -1,5 +1,10 @@ Memory Resource Controller +NOTE: This document is hopelessly outdated and it asks for a complete + rewrite. It still contains a useful information so we are keeping it + here but make sure to check the current code if you need a deeper + understanding. + NOTE: The Memory Resource Controller has generically been referred to as the memory controller in this document. Do not confuse memory controller used here with the memory controller that is used in hardware. diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index de018766be45..c4d080875164 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -25,7 +25,6 @@ #include struct mem_cgroup; -struct page_cgroup; struct page; struct mm_struct; struct kmem_cache; @@ -466,8 +465,6 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order) * memcg_kmem_uncharge_pages: uncharge pages from memcg * @page: pointer to struct page being freed * @order: allocation order. - * - * there is no need to specify memcg here, since it is embedded in page_cgroup */ static inline void memcg_kmem_uncharge_pages(struct page *page, int order) @@ -484,8 +481,7 @@ memcg_kmem_uncharge_pages(struct page *page, int order) * * Needs to be called after memcg_kmem_newpage_charge, regardless of success or * failure of the allocation. if @page is NULL, this function will revert the - * charges. Otherwise, it will commit the memcg given by @memcg to the - * corresponding page_cgroup. + * charges. Otherwise, it will commit @page to @memcg. */ static inline void memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 004e9d17b47e..bf9f57529dcf 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -22,6 +22,7 @@ #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) struct address_space; +struct mem_cgroup; #define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) #define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ @@ -167,6 +168,10 @@ struct page { struct page *first_page; /* Compound tail pages */ }; +#ifdef CONFIG_MEMCG + struct mem_cgroup *mem_cgroup; +#endif + /* * On machines where all RAM is mapped into kernel address space, * we can simply calculate the virtual address. On machines with diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index ffe66e381c04..3879d7664dfc 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -722,9 +722,6 @@ typedef struct pglist_data { int nr_zones; #ifdef CONFIG_FLAT_NODE_MEM_MAP /* means !SPARSEMEM */ struct page *node_mem_map; -#ifdef CONFIG_MEMCG - struct page_cgroup *node_page_cgroup; -#endif #endif #ifndef CONFIG_NO_BOOTMEM struct bootmem_data *bdata; @@ -1078,7 +1075,6 @@ static inline unsigned long early_pfn_to_nid(unsigned long pfn) #define SECTION_ALIGN_DOWN(pfn) ((pfn) & PAGE_SECTION_MASK) struct page; -struct page_cgroup; struct mem_section { /* * This is, logically, a pointer to an array of struct @@ -1096,14 +1092,6 @@ struct mem_section { /* See declaration of similar field in struct zone */ unsigned long *pageblock_flags; -#ifdef CONFIG_MEMCG - /* - * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use - * section. (see memcontrol.h/page_cgroup.h about this.) - */ - struct page_cgroup *page_cgroup; - unsigned long pad; -#endif /* * WARNING: mem_section must be a power-of-2 in size for the * calculation and use of SECTION_ROOT_MASK to make sense. diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h index 1289be6b436c..65be35785c86 100644 --- a/include/linux/page_cgroup.h +++ b/include/linux/page_cgroup.h @@ -1,59 +1,6 @@ #ifndef __LINUX_PAGE_CGROUP_H #define __LINUX_PAGE_CGROUP_H -struct pglist_data; - -#ifdef CONFIG_MEMCG -struct mem_cgroup; - -/* - * Page Cgroup can be considered as an extended mem_map. - * A page_cgroup page is associated with every page descriptor. The - * page_cgroup helps us identify information about the cgroup - * All page cgroups are allocated at boot or memory hotplug event, - * then the page cgroup for pfn always exists. - */ -struct page_cgroup { - struct mem_cgroup *mem_cgroup; -}; - -extern void pgdat_page_cgroup_init(struct pglist_data *pgdat); - -#ifdef CONFIG_SPARSEMEM -static inline void page_cgroup_init_flatmem(void) -{ -} -extern void page_cgroup_init(void); -#else -extern void page_cgroup_init_flatmem(void); -static inline void page_cgroup_init(void) -{ -} -#endif - -struct page_cgroup *lookup_page_cgroup(struct page *page); - -#else /* !CONFIG_MEMCG */ -struct page_cgroup; - -static inline void pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ -} - -static inline struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - return NULL; -} - -static inline void page_cgroup_init(void) -{ -} - -static inline void page_cgroup_init_flatmem(void) -{ -} -#endif /* CONFIG_MEMCG */ - #include #ifdef CONFIG_MEMCG_SWAP diff --git a/init/main.c b/init/main.c index 321d0ceb26d3..d2e4ead4891f 100644 --- a/init/main.c +++ b/init/main.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include #include @@ -485,11 +484,6 @@ void __init __weak thread_info_cache_init(void) */ static void __init mm_init(void) { - /* - * page_cgroup requires contiguous pages, - * bigger than MAX_ORDER unless SPARSEMEM. - */ - page_cgroup_init_flatmem(); mem_init(); kmem_cache_init(); percpu_init_late(); @@ -627,7 +621,6 @@ asmlinkage __visible void __init start_kernel(void) initrd_start = 0; } #endif - page_cgroup_init(); debug_objects_mem_init(); kmemleak_init(); setup_per_cpu_pageset(); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 78cb3b05a9fa..b864067791dc 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1274,7 +1274,6 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) { struct mem_cgroup_per_zone *mz; struct mem_cgroup *memcg; - struct page_cgroup *pc; struct lruvec *lruvec; if (mem_cgroup_disabled()) { @@ -1282,8 +1281,7 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone) goto out; } - pc = lookup_page_cgroup(page); - memcg = pc->mem_cgroup; + memcg = page->mem_cgroup; /* * Swapcache readahead pages are added to the LRU - and * possibly migrated - before they are charged. @@ -2020,16 +2018,13 @@ struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, unsigned long *flags) { struct mem_cgroup *memcg; - struct page_cgroup *pc; rcu_read_lock(); if (mem_cgroup_disabled()) return NULL; - - pc = lookup_page_cgroup(page); again: - memcg = pc->mem_cgroup; + memcg = page->mem_cgroup; if (unlikely(!memcg)) return NULL; @@ -2038,7 +2033,7 @@ again: return memcg; spin_lock_irqsave(&memcg->move_lock, *flags); - if (memcg != pc->mem_cgroup) { + if (memcg != page->mem_cgroup) { spin_unlock_irqrestore(&memcg->move_lock, *flags); goto again; } @@ -2405,15 +2400,12 @@ static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page) { struct mem_cgroup *memcg; - struct page_cgroup *pc; unsigned short id; swp_entry_t ent; VM_BUG_ON_PAGE(!PageLocked(page), page); - pc = lookup_page_cgroup(page); - memcg = pc->mem_cgroup; - + memcg = page->mem_cgroup; if (memcg) { if (!css_tryget_online(&memcg->css)) memcg = NULL; @@ -2463,10 +2455,9 @@ static void unlock_page_lru(struct page *page, int isolated) static void commit_charge(struct page *page, struct mem_cgroup *memcg, bool lrucare) { - struct page_cgroup *pc = lookup_page_cgroup(page); int isolated; - VM_BUG_ON_PAGE(pc->mem_cgroup, page); + VM_BUG_ON_PAGE(page->mem_cgroup, page); /* * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page @@ -2477,7 +2468,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, /* * Nobody should be changing or seriously looking at - * pc->mem_cgroup at this point: + * page->mem_cgroup at this point: * * - the page is uncharged * @@ -2489,7 +2480,7 @@ static void commit_charge(struct page *page, struct mem_cgroup *memcg, * - a page cache insertion, a swapin fault, or a migration * have the page locked */ - pc->mem_cgroup = memcg; + page->mem_cgroup = memcg; if (lrucare) unlock_page_lru(page, isolated); @@ -2972,8 +2963,6 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order) void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, int order) { - struct page_cgroup *pc; - VM_BUG_ON(mem_cgroup_is_root(memcg)); /* The page allocation failed. Revert */ @@ -2981,14 +2970,12 @@ void __memcg_kmem_commit_charge(struct page *page, struct mem_cgroup *memcg, memcg_uncharge_kmem(memcg, 1 << order); return; } - pc = lookup_page_cgroup(page); - pc->mem_cgroup = memcg; + page->mem_cgroup = memcg; } void __memcg_kmem_uncharge_pages(struct page *page, int order) { - struct page_cgroup *pc = lookup_page_cgroup(page); - struct mem_cgroup *memcg = pc->mem_cgroup; + struct mem_cgroup *memcg = page->mem_cgroup; if (!memcg) return; @@ -2996,7 +2983,7 @@ void __memcg_kmem_uncharge_pages(struct page *page, int order) VM_BUG_ON_PAGE(mem_cgroup_is_root(memcg), page); memcg_uncharge_kmem(memcg, 1 << order); - pc->mem_cgroup = NULL; + page->mem_cgroup = NULL; } #else static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) @@ -3014,16 +3001,15 @@ static inline void memcg_unregister_all_caches(struct mem_cgroup *memcg) */ void mem_cgroup_split_huge_fixup(struct page *head) { - struct page_cgroup *pc = lookup_page_cgroup(head); int i; if (mem_cgroup_disabled()) return; for (i = 1; i < HPAGE_PMD_NR; i++) - pc[i].mem_cgroup = pc[0].mem_cgroup; + head[i].mem_cgroup = head->mem_cgroup; - __this_cpu_sub(pc[0].mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], + __this_cpu_sub(head->mem_cgroup->stat->count[MEM_CGROUP_STAT_RSS_HUGE], HPAGE_PMD_NR); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ @@ -3032,7 +3018,6 @@ void mem_cgroup_split_huge_fixup(struct page *head) * mem_cgroup_move_account - move account of the page * @page: the page * @nr_pages: number of regular pages (>1 for huge pages) - * @pc: page_cgroup of the page. * @from: mem_cgroup which the page is moved from. * @to: mem_cgroup which the page is moved to. @from != @to. * @@ -3045,7 +3030,6 @@ void mem_cgroup_split_huge_fixup(struct page *head) */ static int mem_cgroup_move_account(struct page *page, unsigned int nr_pages, - struct page_cgroup *pc, struct mem_cgroup *from, struct mem_cgroup *to) { @@ -3065,7 +3049,7 @@ static int mem_cgroup_move_account(struct page *page, goto out; /* - * Prevent mem_cgroup_migrate() from looking at pc->mem_cgroup + * Prevent mem_cgroup_migrate() from looking at page->mem_cgroup * of its source page while we change it: page migration takes * both pages off the LRU, but page cache replacement doesn't. */ @@ -3073,7 +3057,7 @@ static int mem_cgroup_move_account(struct page *page, goto out; ret = -EINVAL; - if (pc->mem_cgroup != from) + if (page->mem_cgroup != from) goto out_unlock; spin_lock_irqsave(&from->move_lock, flags); @@ -3093,13 +3077,13 @@ static int mem_cgroup_move_account(struct page *page, } /* - * It is safe to change pc->mem_cgroup here because the page + * It is safe to change page->mem_cgroup here because the page * is referenced, charged, and isolated - we can't race with * uncharging, charging, migration, or LRU putback. */ /* caller should have done css_get */ - pc->mem_cgroup = to; + page->mem_cgroup = to; spin_unlock_irqrestore(&from->move_lock, flags); ret = 0; @@ -3174,36 +3158,17 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, #endif #ifdef CONFIG_DEBUG_VM -static struct page_cgroup *lookup_page_cgroup_used(struct page *page) -{ - struct page_cgroup *pc; - - pc = lookup_page_cgroup(page); - /* - * Can be NULL while feeding pages into the page allocator for - * the first time, i.e. during boot or memory hotplug; - * or when mem_cgroup_disabled(). - */ - if (likely(pc) && pc->mem_cgroup) - return pc; - return NULL; -} - bool mem_cgroup_bad_page_check(struct page *page) { if (mem_cgroup_disabled()) return false; - return lookup_page_cgroup_used(page) != NULL; + return page->mem_cgroup != NULL; } void mem_cgroup_print_bad_page(struct page *page) { - struct page_cgroup *pc; - - pc = lookup_page_cgroup_used(page); - if (pc) - pr_alert("pc:%p pc->mem_cgroup:%p\n", pc, pc->mem_cgroup); + pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); } #endif @@ -5123,7 +5088,6 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, unsigned long addr, pte_t ptent, union mc_target *target) { struct page *page = NULL; - struct page_cgroup *pc; enum mc_target_type ret = MC_TARGET_NONE; swp_entry_t ent = { .val = 0 }; @@ -5137,13 +5101,12 @@ static enum mc_target_type get_mctgt_type(struct vm_area_struct *vma, if (!page && !ent.val) return ret; if (page) { - pc = lookup_page_cgroup(page); /* * Do only loose check w/o serialization. - * mem_cgroup_move_account() checks the pc is valid or + * mem_cgroup_move_account() checks the page is valid or * not under LRU exclusion. */ - if (pc->mem_cgroup == mc.from) { + if (page->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; if (target) target->page = page; @@ -5171,15 +5134,13 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma, unsigned long addr, pmd_t pmd, union mc_target *target) { struct page *page = NULL; - struct page_cgroup *pc; enum mc_target_type ret = MC_TARGET_NONE; page = pmd_page(pmd); VM_BUG_ON_PAGE(!page || !PageHead(page), page); if (!move_anon()) return ret; - pc = lookup_page_cgroup(page); - if (pc->mem_cgroup == mc.from) { + if (page->mem_cgroup == mc.from) { ret = MC_TARGET_PAGE; if (target) { get_page(page); @@ -5378,7 +5339,6 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, enum mc_target_type target_type; union mc_target target; struct page *page; - struct page_cgroup *pc; /* * We don't take compound_lock() here but no race with splitting thp @@ -5399,9 +5359,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd, if (target_type == MC_TARGET_PAGE) { page = target.page; if (!isolate_lru_page(page)) { - pc = lookup_page_cgroup(page); if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, - pc, mc.from, mc.to)) { + mc.from, mc.to)) { mc.precharge -= HPAGE_PMD_NR; mc.moved_charge += HPAGE_PMD_NR; } @@ -5429,9 +5388,7 @@ retry: page = target.page; if (isolate_lru_page(page)) goto put; - pc = lookup_page_cgroup(page); - if (!mem_cgroup_move_account(page, 1, pc, - mc.from, mc.to)) { + if (!mem_cgroup_move_account(page, 1, mc.from, mc.to)) { mc.precharge--; /* we uncharge from mc.from later. */ mc.moved_charge++; @@ -5619,7 +5576,6 @@ static void __init enable_swap_cgroup(void) void mem_cgroup_swapout(struct page *page, swp_entry_t entry) { struct mem_cgroup *memcg; - struct page_cgroup *pc; unsigned short oldid; VM_BUG_ON_PAGE(PageLRU(page), page); @@ -5628,8 +5584,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) if (!do_swap_account) return; - pc = lookup_page_cgroup(page); - memcg = pc->mem_cgroup; + memcg = page->mem_cgroup; /* Readahead page, never charged */ if (!memcg) @@ -5639,7 +5594,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry) VM_BUG_ON_PAGE(oldid, page); mem_cgroup_swap_statistics(memcg, true); - pc->mem_cgroup = NULL; + page->mem_cgroup = NULL; if (!mem_cgroup_is_root(memcg)) page_counter_uncharge(&memcg->memory, 1); @@ -5706,7 +5661,6 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, goto out; if (PageSwapCache(page)) { - struct page_cgroup *pc = lookup_page_cgroup(page); /* * Every swap fault against a single page tries to charge the * page, bail as early as possible. shmem_unuse() encounters @@ -5714,7 +5668,7 @@ int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, * the page lock, which serializes swap cache removal, which * in turn serializes uncharging. */ - if (pc->mem_cgroup) + if (page->mem_cgroup) goto out; } @@ -5867,7 +5821,6 @@ static void uncharge_list(struct list_head *page_list) next = page_list->next; do { unsigned int nr_pages = 1; - struct page_cgroup *pc; page = list_entry(next, struct page, lru); next = page->lru.next; @@ -5875,23 +5828,22 @@ static void uncharge_list(struct list_head *page_list) VM_BUG_ON_PAGE(PageLRU(page), page); VM_BUG_ON_PAGE(page_count(page), page); - pc = lookup_page_cgroup(page); - if (!pc->mem_cgroup) + if (!page->mem_cgroup) continue; /* * Nobody should be changing or seriously looking at - * pc->mem_cgroup at this point, we have fully + * page->mem_cgroup at this point, we have fully * exclusive access to the page. */ - if (memcg != pc->mem_cgroup) { + if (memcg != page->mem_cgroup) { if (memcg) { uncharge_batch(memcg, pgpgout, nr_anon, nr_file, nr_huge, page); pgpgout = nr_anon = nr_file = nr_huge = 0; } - memcg = pc->mem_cgroup; + memcg = page->mem_cgroup; } if (PageTransHuge(page)) { @@ -5905,7 +5857,7 @@ static void uncharge_list(struct list_head *page_list) else nr_file += nr_pages; - pc->mem_cgroup = NULL; + page->mem_cgroup = NULL; pgpgout++; } while (next != page_list); @@ -5924,14 +5876,11 @@ static void uncharge_list(struct list_head *page_list) */ void mem_cgroup_uncharge(struct page *page) { - struct page_cgroup *pc; - if (mem_cgroup_disabled()) return; /* Don't touch page->lru of any random page, pre-check: */ - pc = lookup_page_cgroup(page); - if (!pc->mem_cgroup) + if (!page->mem_cgroup) return; INIT_LIST_HEAD(&page->lru); @@ -5968,7 +5917,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, bool lrucare) { struct mem_cgroup *memcg; - struct page_cgroup *pc; int isolated; VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); @@ -5983,8 +5931,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, return; /* Page cache replacement: new page already charged? */ - pc = lookup_page_cgroup(newpage); - if (pc->mem_cgroup) + if (newpage->mem_cgroup) return; /* @@ -5993,15 +5940,14 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage, * uncharged page when the PFN walker finds a page that * reclaim just put back on the LRU but has not released yet. */ - pc = lookup_page_cgroup(oldpage); - memcg = pc->mem_cgroup; + memcg = oldpage->mem_cgroup; if (!memcg) return; if (lrucare) lock_page_lru(oldpage, &isolated); - pc->mem_cgroup = NULL; + oldpage->mem_cgroup = NULL; if (lrucare) unlock_page_lru(oldpage, isolated); diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 97b6966816e5..22cfdeffbf69 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -48,7 +48,6 @@ #include #include #include -#include #include #include #include @@ -4853,7 +4852,6 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, #endif init_waitqueue_head(&pgdat->kswapd_wait); init_waitqueue_head(&pgdat->pfmemalloc_wait); - pgdat_page_cgroup_init(pgdat); for (j = 0; j < MAX_NR_ZONES; j++) { struct zone *zone = pgdat->node_zones + j; diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 5331c2bd85a2..f0f31c1d4d0c 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c @@ -1,326 +1,7 @@ #include -#include -#include -#include #include -#include -#include -#include #include -#include #include -#include - -static unsigned long total_usage; - -#if !defined(CONFIG_SPARSEMEM) - - -void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ - pgdat->node_page_cgroup = NULL; -} - -struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - unsigned long pfn = page_to_pfn(page); - unsigned long offset; - struct page_cgroup *base; - - base = NODE_DATA(page_to_nid(page))->node_page_cgroup; -#ifdef CONFIG_DEBUG_VM - /* - * The sanity checks the page allocator does upon freeing a - * page can reach here before the page_cgroup arrays are - * allocated when feeding a range of pages to the allocator - * for the first time during bootup or memory hotplug. - */ - if (unlikely(!base)) - return NULL; -#endif - offset = pfn - NODE_DATA(page_to_nid(page))->node_start_pfn; - return base + offset; -} - -static int __init alloc_node_page_cgroup(int nid) -{ - struct page_cgroup *base; - unsigned long table_size; - unsigned long nr_pages; - - nr_pages = NODE_DATA(nid)->node_spanned_pages; - if (!nr_pages) - return 0; - - table_size = sizeof(struct page_cgroup) * nr_pages; - - base = memblock_virt_alloc_try_nid_nopanic( - table_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS), - BOOTMEM_ALLOC_ACCESSIBLE, nid); - if (!base) - return -ENOMEM; - NODE_DATA(nid)->node_page_cgroup = base; - total_usage += table_size; - return 0; -} - -void __init page_cgroup_init_flatmem(void) -{ - - int nid, fail; - - if (mem_cgroup_disabled()) - return; - - for_each_online_node(nid) { - fail = alloc_node_page_cgroup(nid); - if (fail) - goto fail; - } - printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); - printk(KERN_INFO "please try 'cgroup_disable=memory' option if you" - " don't want memory cgroups\n"); - return; -fail: - printk(KERN_CRIT "allocation of page_cgroup failed.\n"); - printk(KERN_CRIT "please try 'cgroup_disable=memory' boot option\n"); - panic("Out of memory"); -} - -#else /* CONFIG_FLAT_NODE_MEM_MAP */ - -struct page_cgroup *lookup_page_cgroup(struct page *page) -{ - unsigned long pfn = page_to_pfn(page); - struct mem_section *section = __pfn_to_section(pfn); -#ifdef CONFIG_DEBUG_VM - /* - * The sanity checks the page allocator does upon freeing a - * page can reach here before the page_cgroup arrays are - * allocated when feeding a range of pages to the allocator - * for the first time during bootup or memory hotplug. - */ - if (!section->page_cgroup) - return NULL; -#endif - return section->page_cgroup + pfn; -} - -static void *__meminit alloc_page_cgroup(size_t size, int nid) -{ - gfp_t flags = GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN; - void *addr = NULL; - - addr = alloc_pages_exact_nid(nid, size, flags); - if (addr) { - kmemleak_alloc(addr, size, 1, flags); - return addr; - } - - if (node_state(nid, N_HIGH_MEMORY)) - addr = vzalloc_node(size, nid); - else - addr = vzalloc(size); - - return addr; -} - -static int __meminit init_section_page_cgroup(unsigned long pfn, int nid) -{ - struct mem_section *section; - struct page_cgroup *base; - unsigned long table_size; - - section = __pfn_to_section(pfn); - - if (section->page_cgroup) - return 0; - - table_size = sizeof(struct page_cgroup) * PAGES_PER_SECTION; - base = alloc_page_cgroup(table_size, nid); - - /* - * The value stored in section->page_cgroup is (base - pfn) - * and it does not point to the memory block allocated above, - * causing kmemleak false positives. - */ - kmemleak_not_leak(base); - - if (!base) { - printk(KERN_ERR "page cgroup allocation failure\n"); - return -ENOMEM; - } - - /* - * The passed "pfn" may not be aligned to SECTION. For the calculation - * we need to apply a mask. - */ - pfn &= PAGE_SECTION_MASK; - section->page_cgroup = base - pfn; - total_usage += table_size; - return 0; -} -#ifdef CONFIG_MEMORY_HOTPLUG -static void free_page_cgroup(void *addr) -{ - if (is_vmalloc_addr(addr)) { - vfree(addr); - } else { - struct page *page = virt_to_page(addr); - size_t table_size = - sizeof(struct page_cgroup) * PAGES_PER_SECTION; - - BUG_ON(PageReserved(page)); - kmemleak_free(addr); - free_pages_exact(addr, table_size); - } -} - -static void __free_page_cgroup(unsigned long pfn) -{ - struct mem_section *ms; - struct page_cgroup *base; - - ms = __pfn_to_section(pfn); - if (!ms || !ms->page_cgroup) - return; - base = ms->page_cgroup + pfn; - free_page_cgroup(base); - ms->page_cgroup = NULL; -} - -static int __meminit online_page_cgroup(unsigned long start_pfn, - unsigned long nr_pages, - int nid) -{ - unsigned long start, end, pfn; - int fail = 0; - - start = SECTION_ALIGN_DOWN(start_pfn); - end = SECTION_ALIGN_UP(start_pfn + nr_pages); - - if (nid == -1) { - /* - * In this case, "nid" already exists and contains valid memory. - * "start_pfn" passed to us is a pfn which is an arg for - * online__pages(), and start_pfn should exist. - */ - nid = pfn_to_nid(start_pfn); - VM_BUG_ON(!node_state(nid, N_ONLINE)); - } - - for (pfn = start; !fail && pfn < end; pfn += PAGES_PER_SECTION) { - if (!pfn_present(pfn)) - continue; - fail = init_section_page_cgroup(pfn, nid); - } - if (!fail) - return 0; - - /* rollback */ - for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) - __free_page_cgroup(pfn); - - return -ENOMEM; -} - -static int __meminit offline_page_cgroup(unsigned long start_pfn, - unsigned long nr_pages, int nid) -{ - unsigned long start, end, pfn; - - start = SECTION_ALIGN_DOWN(start_pfn); - end = SECTION_ALIGN_UP(start_pfn + nr_pages); - - for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) - __free_page_cgroup(pfn); - return 0; - -} - -static int __meminit page_cgroup_callback(struct notifier_block *self, - unsigned long action, void *arg) -{ - struct memory_notify *mn = arg; - int ret = 0; - switch (action) { - case MEM_GOING_ONLINE: - ret = online_page_cgroup(mn->start_pfn, - mn->nr_pages, mn->status_change_nid); - break; - case MEM_OFFLINE: - offline_page_cgroup(mn->start_pfn, - mn->nr_pages, mn->status_change_nid); - break; - case MEM_CANCEL_ONLINE: - offline_page_cgroup(mn->start_pfn, - mn->nr_pages, mn->status_change_nid); - break; - case MEM_GOING_OFFLINE: - break; - case MEM_ONLINE: - case MEM_CANCEL_OFFLINE: - break; - } - - return notifier_from_errno(ret); -} - -#endif - -void __init page_cgroup_init(void) -{ - unsigned long pfn; - int nid; - - if (mem_cgroup_disabled()) - return; - - for_each_node_state(nid, N_MEMORY) { - unsigned long start_pfn, end_pfn; - - start_pfn = node_start_pfn(nid); - end_pfn = node_end_pfn(nid); - /* - * start_pfn and end_pfn may not be aligned to SECTION and the - * page->flags of out of node pages are not initialized. So we - * scan [start_pfn, the biggest section's pfn < end_pfn) here. - */ - for (pfn = start_pfn; - pfn < end_pfn; - pfn = ALIGN(pfn + 1, PAGES_PER_SECTION)) { - - if (!pfn_valid(pfn)) - continue; - /* - * Nodes's pfns can be overlapping. - * We know some arch can have a nodes layout such as - * -------------pfn--------------> - * N0 | N1 | N2 | N0 | N1 | N2|.... - */ - if (pfn_to_nid(pfn) != nid) - continue; - if (init_section_page_cgroup(pfn, nid)) - goto oom; - } - } - hotplug_memory_notifier(page_cgroup_callback, 0); - printk(KERN_INFO "allocated %ld bytes of page_cgroup\n", total_usage); - printk(KERN_INFO "please try 'cgroup_disable=memory' option if you " - "don't want memory cgroups\n"); - return; -oom: - printk(KERN_CRIT "try 'cgroup_disable=memory' boot option\n"); - panic("Out of memory"); -} - -void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat) -{ - return; -} - -#endif - #ifdef CONFIG_MEMCG_SWAP -- cgit v1.2.3 From 5d1ea48bdde67898e87d6d8f511fd097fa64c749 Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:55 -0800 Subject: mm: page_cgroup: rename file to mm/swap_cgroup.c Now that the external page_cgroup data structure and its lookup is gone, the only code remaining in there is swap slot accounting. Rename it and move the conditional compilation into mm/Makefile. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Acked-by: David S. Miller Acked-by: KAMEZAWA Hiroyuki Cc: "Kirill A. Shutemov" Cc: Tejun Heo Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 2 +- include/linux/page_cgroup.h | 40 --------- include/linux/swap_cgroup.h | 42 +++++++++ mm/Makefile | 3 +- mm/memcontrol.c | 2 +- mm/page_cgroup.c | 211 -------------------------------------------- mm/swap_cgroup.c | 208 +++++++++++++++++++++++++++++++++++++++++++ mm/swap_state.c | 1 - mm/swapfile.c | 2 +- 9 files changed, 255 insertions(+), 256 deletions(-) delete mode 100644 include/linux/page_cgroup.h create mode 100644 include/linux/swap_cgroup.h delete mode 100644 mm/page_cgroup.c create mode 100644 mm/swap_cgroup.c (limited to 'mm') diff --git a/MAINTAINERS b/MAINTAINERS index 0d6469a2cf70..0aedd3e1804b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2606,7 +2606,7 @@ L: cgroups@vger.kernel.org L: linux-mm@kvack.org S: Maintained F: mm/memcontrol.c -F: mm/page_cgroup.c +F: mm/swap_cgroup.c CORETEMP HARDWARE MONITORING DRIVER M: Fenghua Yu diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h deleted file mode 100644 index 65be35785c86..000000000000 --- a/include/linux/page_cgroup.h +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef __LINUX_PAGE_CGROUP_H -#define __LINUX_PAGE_CGROUP_H - -#include - -#ifdef CONFIG_MEMCG_SWAP -extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new); -extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); -extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); -extern int swap_cgroup_swapon(int type, unsigned long max_pages); -extern void swap_cgroup_swapoff(int type); -#else - -static inline -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - return 0; -} - -static inline -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return 0; -} - -static inline int -swap_cgroup_swapon(int type, unsigned long max_pages) -{ - return 0; -} - -static inline void swap_cgroup_swapoff(int type) -{ - return; -} - -#endif /* CONFIG_MEMCG_SWAP */ - -#endif /* __LINUX_PAGE_CGROUP_H */ diff --git a/include/linux/swap_cgroup.h b/include/linux/swap_cgroup.h new file mode 100644 index 000000000000..145306bdc92f --- /dev/null +++ b/include/linux/swap_cgroup.h @@ -0,0 +1,42 @@ +#ifndef __LINUX_SWAP_CGROUP_H +#define __LINUX_SWAP_CGROUP_H + +#include + +#ifdef CONFIG_MEMCG_SWAP + +extern unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new); +extern unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id); +extern unsigned short lookup_swap_cgroup_id(swp_entry_t ent); +extern int swap_cgroup_swapon(int type, unsigned long max_pages); +extern void swap_cgroup_swapoff(int type); + +#else + +static inline +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + return 0; +} + +static inline +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return 0; +} + +static inline int +swap_cgroup_swapon(int type, unsigned long max_pages) +{ + return 0; +} + +static inline void swap_cgroup_swapoff(int type) +{ + return; +} + +#endif /* CONFIG_MEMCG_SWAP */ + +#endif /* __LINUX_SWAP_CGROUP_H */ diff --git a/mm/Makefile b/mm/Makefile index 6d9f40e922f7..b3c6ce932c64 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -56,7 +56,8 @@ obj-$(CONFIG_MIGRATION) += migrate.o obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_TRANSPARENT_HUGEPAGE) += huge_memory.o obj-$(CONFIG_PAGE_COUNTER) += page_counter.o -obj-$(CONFIG_MEMCG) += memcontrol.o page_cgroup.o vmpressure.o +obj-$(CONFIG_MEMCG) += memcontrol.o vmpressure.o +obj-$(CONFIG_MEMCG_SWAP) += swap_cgroup.o obj-$(CONFIG_CGROUP_HUGETLB) += hugetlb_cgroup.o obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o diff --git a/mm/memcontrol.c b/mm/memcontrol.c index b864067791dc..ab270e34ba3e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -51,7 +51,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c deleted file mode 100644 index f0f31c1d4d0c..000000000000 --- a/mm/page_cgroup.c +++ /dev/null @@ -1,211 +0,0 @@ -#include -#include -#include -#include - -#ifdef CONFIG_MEMCG_SWAP - -static DEFINE_MUTEX(swap_cgroup_mutex); -struct swap_cgroup_ctrl { - struct page **map; - unsigned long length; - spinlock_t lock; -}; - -static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; - -struct swap_cgroup { - unsigned short id; -}; -#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) - -/* - * SwapCgroup implements "lookup" and "exchange" operations. - * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge - * against SwapCache. At swap_free(), this is accessed directly from swap. - * - * This means, - * - we have no race in "exchange" when we're accessed via SwapCache because - * SwapCache(and its swp_entry) is under lock. - * - When called via swap_free(), there is no user of this entry and no race. - * Then, we don't need lock around "exchange". - * - * TODO: we can push these buffers out to HIGHMEM. - */ - -/* - * allocate buffer for swap_cgroup. - */ -static int swap_cgroup_prepare(int type) -{ - struct page *page; - struct swap_cgroup_ctrl *ctrl; - unsigned long idx, max; - - ctrl = &swap_cgroup_ctrl[type]; - - for (idx = 0; idx < ctrl->length; idx++) { - page = alloc_page(GFP_KERNEL | __GFP_ZERO); - if (!page) - goto not_enough_page; - ctrl->map[idx] = page; - } - return 0; -not_enough_page: - max = idx; - for (idx = 0; idx < max; idx++) - __free_page(ctrl->map[idx]); - - return -ENOMEM; -} - -static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, - struct swap_cgroup_ctrl **ctrlp) -{ - pgoff_t offset = swp_offset(ent); - struct swap_cgroup_ctrl *ctrl; - struct page *mappage; - struct swap_cgroup *sc; - - ctrl = &swap_cgroup_ctrl[swp_type(ent)]; - if (ctrlp) - *ctrlp = ctrl; - - mappage = ctrl->map[offset / SC_PER_PAGE]; - sc = page_address(mappage); - return sc + offset % SC_PER_PAGE; -} - -/** - * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. - * @ent: swap entry to be cmpxchged - * @old: old id - * @new: new id - * - * Returns old id at success, 0 at failure. - * (There is no mem_cgroup using 0 as its id) - */ -unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, - unsigned short old, unsigned short new) -{ - struct swap_cgroup_ctrl *ctrl; - struct swap_cgroup *sc; - unsigned long flags; - unsigned short retval; - - sc = lookup_swap_cgroup(ent, &ctrl); - - spin_lock_irqsave(&ctrl->lock, flags); - retval = sc->id; - if (retval == old) - sc->id = new; - else - retval = 0; - spin_unlock_irqrestore(&ctrl->lock, flags); - return retval; -} - -/** - * swap_cgroup_record - record mem_cgroup for this swp_entry. - * @ent: swap entry to be recorded into - * @id: mem_cgroup to be recorded - * - * Returns old value at success, 0 at failure. - * (Of course, old value can be 0.) - */ -unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) -{ - struct swap_cgroup_ctrl *ctrl; - struct swap_cgroup *sc; - unsigned short old; - unsigned long flags; - - sc = lookup_swap_cgroup(ent, &ctrl); - - spin_lock_irqsave(&ctrl->lock, flags); - old = sc->id; - sc->id = id; - spin_unlock_irqrestore(&ctrl->lock, flags); - - return old; -} - -/** - * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry - * @ent: swap entry to be looked up. - * - * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) - */ -unsigned short lookup_swap_cgroup_id(swp_entry_t ent) -{ - return lookup_swap_cgroup(ent, NULL)->id; -} - -int swap_cgroup_swapon(int type, unsigned long max_pages) -{ - void *array; - unsigned long array_size; - unsigned long length; - struct swap_cgroup_ctrl *ctrl; - - if (!do_swap_account) - return 0; - - length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); - array_size = length * sizeof(void *); - - array = vzalloc(array_size); - if (!array) - goto nomem; - - ctrl = &swap_cgroup_ctrl[type]; - mutex_lock(&swap_cgroup_mutex); - ctrl->length = length; - ctrl->map = array; - spin_lock_init(&ctrl->lock); - if (swap_cgroup_prepare(type)) { - /* memory shortage */ - ctrl->map = NULL; - ctrl->length = 0; - mutex_unlock(&swap_cgroup_mutex); - vfree(array); - goto nomem; - } - mutex_unlock(&swap_cgroup_mutex); - - return 0; -nomem: - printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); - printk(KERN_INFO - "swap_cgroup can be disabled by swapaccount=0 boot option\n"); - return -ENOMEM; -} - -void swap_cgroup_swapoff(int type) -{ - struct page **map; - unsigned long i, length; - struct swap_cgroup_ctrl *ctrl; - - if (!do_swap_account) - return; - - mutex_lock(&swap_cgroup_mutex); - ctrl = &swap_cgroup_ctrl[type]; - map = ctrl->map; - length = ctrl->length; - ctrl->map = NULL; - ctrl->length = 0; - mutex_unlock(&swap_cgroup_mutex); - - if (map) { - for (i = 0; i < length; i++) { - struct page *page = map[i]; - if (page) - __free_page(page); - } - vfree(map); - } -} - -#endif diff --git a/mm/swap_cgroup.c b/mm/swap_cgroup.c new file mode 100644 index 000000000000..b5f7f24b8dd1 --- /dev/null +++ b/mm/swap_cgroup.c @@ -0,0 +1,208 @@ +#include +#include +#include + +#include /* depends on mm.h include */ + +static DEFINE_MUTEX(swap_cgroup_mutex); +struct swap_cgroup_ctrl { + struct page **map; + unsigned long length; + spinlock_t lock; +}; + +static struct swap_cgroup_ctrl swap_cgroup_ctrl[MAX_SWAPFILES]; + +struct swap_cgroup { + unsigned short id; +}; +#define SC_PER_PAGE (PAGE_SIZE/sizeof(struct swap_cgroup)) + +/* + * SwapCgroup implements "lookup" and "exchange" operations. + * In typical usage, this swap_cgroup is accessed via memcg's charge/uncharge + * against SwapCache. At swap_free(), this is accessed directly from swap. + * + * This means, + * - we have no race in "exchange" when we're accessed via SwapCache because + * SwapCache(and its swp_entry) is under lock. + * - When called via swap_free(), there is no user of this entry and no race. + * Then, we don't need lock around "exchange". + * + * TODO: we can push these buffers out to HIGHMEM. + */ + +/* + * allocate buffer for swap_cgroup. + */ +static int swap_cgroup_prepare(int type) +{ + struct page *page; + struct swap_cgroup_ctrl *ctrl; + unsigned long idx, max; + + ctrl = &swap_cgroup_ctrl[type]; + + for (idx = 0; idx < ctrl->length; idx++) { + page = alloc_page(GFP_KERNEL | __GFP_ZERO); + if (!page) + goto not_enough_page; + ctrl->map[idx] = page; + } + return 0; +not_enough_page: + max = idx; + for (idx = 0; idx < max; idx++) + __free_page(ctrl->map[idx]); + + return -ENOMEM; +} + +static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent, + struct swap_cgroup_ctrl **ctrlp) +{ + pgoff_t offset = swp_offset(ent); + struct swap_cgroup_ctrl *ctrl; + struct page *mappage; + struct swap_cgroup *sc; + + ctrl = &swap_cgroup_ctrl[swp_type(ent)]; + if (ctrlp) + *ctrlp = ctrl; + + mappage = ctrl->map[offset / SC_PER_PAGE]; + sc = page_address(mappage); + return sc + offset % SC_PER_PAGE; +} + +/** + * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. + * @ent: swap entry to be cmpxchged + * @old: old id + * @new: new id + * + * Returns old id at success, 0 at failure. + * (There is no mem_cgroup using 0 as its id) + */ +unsigned short swap_cgroup_cmpxchg(swp_entry_t ent, + unsigned short old, unsigned short new) +{ + struct swap_cgroup_ctrl *ctrl; + struct swap_cgroup *sc; + unsigned long flags; + unsigned short retval; + + sc = lookup_swap_cgroup(ent, &ctrl); + + spin_lock_irqsave(&ctrl->lock, flags); + retval = sc->id; + if (retval == old) + sc->id = new; + else + retval = 0; + spin_unlock_irqrestore(&ctrl->lock, flags); + return retval; +} + +/** + * swap_cgroup_record - record mem_cgroup for this swp_entry. + * @ent: swap entry to be recorded into + * @id: mem_cgroup to be recorded + * + * Returns old value at success, 0 at failure. + * (Of course, old value can be 0.) + */ +unsigned short swap_cgroup_record(swp_entry_t ent, unsigned short id) +{ + struct swap_cgroup_ctrl *ctrl; + struct swap_cgroup *sc; + unsigned short old; + unsigned long flags; + + sc = lookup_swap_cgroup(ent, &ctrl); + + spin_lock_irqsave(&ctrl->lock, flags); + old = sc->id; + sc->id = id; + spin_unlock_irqrestore(&ctrl->lock, flags); + + return old; +} + +/** + * lookup_swap_cgroup_id - lookup mem_cgroup id tied to swap entry + * @ent: swap entry to be looked up. + * + * Returns ID of mem_cgroup at success. 0 at failure. (0 is invalid ID) + */ +unsigned short lookup_swap_cgroup_id(swp_entry_t ent) +{ + return lookup_swap_cgroup(ent, NULL)->id; +} + +int swap_cgroup_swapon(int type, unsigned long max_pages) +{ + void *array; + unsigned long array_size; + unsigned long length; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return 0; + + length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); + array_size = length * sizeof(void *); + + array = vzalloc(array_size); + if (!array) + goto nomem; + + ctrl = &swap_cgroup_ctrl[type]; + mutex_lock(&swap_cgroup_mutex); + ctrl->length = length; + ctrl->map = array; + spin_lock_init(&ctrl->lock); + if (swap_cgroup_prepare(type)) { + /* memory shortage */ + ctrl->map = NULL; + ctrl->length = 0; + mutex_unlock(&swap_cgroup_mutex); + vfree(array); + goto nomem; + } + mutex_unlock(&swap_cgroup_mutex); + + return 0; +nomem: + printk(KERN_INFO "couldn't allocate enough memory for swap_cgroup.\n"); + printk(KERN_INFO + "swap_cgroup can be disabled by swapaccount=0 boot option\n"); + return -ENOMEM; +} + +void swap_cgroup_swapoff(int type) +{ + struct page **map; + unsigned long i, length; + struct swap_cgroup_ctrl *ctrl; + + if (!do_swap_account) + return; + + mutex_lock(&swap_cgroup_mutex); + ctrl = &swap_cgroup_ctrl[type]; + map = ctrl->map; + length = ctrl->length; + ctrl->map = NULL; + ctrl->length = 0; + mutex_unlock(&swap_cgroup_mutex); + + if (map) { + for (i = 0; i < length; i++) { + struct page *page = map[i]; + if (page) + __free_page(page); + } + vfree(map); + } +} diff --git a/mm/swap_state.c b/mm/swap_state.c index 154444918685..9711342987a0 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -17,7 +17,6 @@ #include #include #include -#include #include diff --git a/mm/swapfile.c b/mm/swapfile.c index 8798b2e0ac59..63f55ccb9b26 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -38,7 +38,7 @@ #include #include #include -#include +#include static bool swap_count_continued(struct swap_info_struct *, pgoff_t, unsigned char); -- cgit v1.2.3 From 9edad6ea0f1416415f6fe31cc9d1dbc3817803ed Mon Sep 17 00:00:00 2001 From: Johannes Weiner Date: Wed, 10 Dec 2014 15:44:58 -0800 Subject: mm: move page->mem_cgroup bad page handling into generic code Now that the external page_cgroup data structure and its lookup is gone, let the generic bad_page() check for page->mem_cgroup sanity. Signed-off-by: Johannes Weiner Acked-by: Michal Hocko Acked-by: Vladimir Davydov Acked-by: David S. Miller Cc: KAMEZAWA Hiroyuki Cc: "Kirill A. Shutemov" Cc: Tejun Heo Cc: Joonsoo Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/memcontrol.h | 17 ----------------- init/Kconfig | 12 ------------ mm/debug.c | 5 ++++- mm/memcontrol.c | 15 --------------- mm/page_alloc.c | 12 ++++++++---- 5 files changed, 12 insertions(+), 49 deletions(-) (limited to 'mm') diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index c4d080875164..6ea9f919e888 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -173,10 +173,6 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm, void mem_cgroup_split_huge_fixup(struct page *head); #endif -#ifdef CONFIG_DEBUG_VM -bool mem_cgroup_bad_page_check(struct page *page); -void mem_cgroup_print_bad_page(struct page *page); -#endif #else /* CONFIG_MEMCG */ struct mem_cgroup; @@ -346,19 +342,6 @@ void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) } #endif /* CONFIG_MEMCG */ -#if !defined(CONFIG_MEMCG) || !defined(CONFIG_DEBUG_VM) -static inline bool -mem_cgroup_bad_page_check(struct page *page) -{ - return false; -} - -static inline void -mem_cgroup_print_bad_page(struct page *page) -{ -} -#endif - enum { UNDER_LIMIT, SOFT_LIMIT, diff --git a/init/Kconfig b/init/Kconfig index 46768752130d..7e9fbd48e2ab 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -983,18 +983,6 @@ config MEMCG Provides a memory resource controller that manages both anonymous memory and page cache. (See Documentation/cgroups/memory.txt) - Note that setting this option increases fixed memory overhead - associated with each page of memory in the system. By this, - 8(16)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory - usage tracking struct at boot. Total amount of this is printed out - at boot. - - Only enable when you're ok with these trade offs and really - sure you need the memory resource controller. Even when you enable - this, you can set "cgroup_disable=memory" at your boot option to - disable memory resource controller and you can avoid overheads. - (and lose benefits of memory resource controller) - config MEMCG_SWAP bool "Memory Resource Controller Swap Extension" depends on MEMCG && SWAP diff --git a/mm/debug.c b/mm/debug.c index 5ce45c9a29b5..0e58f3211f89 100644 --- a/mm/debug.c +++ b/mm/debug.c @@ -95,7 +95,10 @@ void dump_page_badflags(struct page *page, const char *reason, dump_flags(page->flags & badflags, pageflag_names, ARRAY_SIZE(pageflag_names)); } - mem_cgroup_print_bad_page(page); +#ifdef CONFIG_MEMCG + if (page->mem_cgroup) + pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); +#endif } void dump_page(struct page *page, const char *reason) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ab270e34ba3e..1869cb64d089 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3157,21 +3157,6 @@ static inline int mem_cgroup_move_swap_account(swp_entry_t entry, } #endif -#ifdef CONFIG_DEBUG_VM -bool mem_cgroup_bad_page_check(struct page *page) -{ - if (mem_cgroup_disabled()) - return false; - - return page->mem_cgroup != NULL; -} - -void mem_cgroup_print_bad_page(struct page *page) -{ - pr_alert("page->mem_cgroup:%p\n", page->mem_cgroup); -} -#endif - static DEFINE_MUTEX(memcg_limit_mutex); static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 22cfdeffbf69..a7198c065999 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -640,8 +640,10 @@ static inline int free_pages_check(struct page *page) bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set"; bad_flags = PAGE_FLAGS_CHECK_AT_FREE; } - if (unlikely(mem_cgroup_bad_page_check(page))) - bad_reason = "cgroup check failed"; +#ifdef CONFIG_MEMCG + if (unlikely(page->mem_cgroup)) + bad_reason = "page still charged to cgroup"; +#endif if (unlikely(bad_reason)) { bad_page(page, bad_reason, bad_flags); return 1; @@ -900,8 +902,10 @@ static inline int check_new_page(struct page *page) bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; bad_flags = PAGE_FLAGS_CHECK_AT_PREP; } - if (unlikely(mem_cgroup_bad_page_check(page))) - bad_reason = "cgroup check failed"; +#ifdef CONFIG_MEMCG + if (unlikely(page->mem_cgroup)) + bad_reason = "page still charged to cgroup"; +#endif if (unlikely(bad_reason)) { bad_page(page, bad_reason, bad_flags); return 1; -- cgit v1.2.3