summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2021-01-15 15:25:45 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2021-01-15 15:25:45 -0800
commitb45e2da6e444280f8661dca439c1e377761b2877 (patch)
tree3f79245f00b9fdad03cb515595a4ce835a7243bc
parent8cbe71e7e01a9e45a390b204403880c90a226039 (diff)
parenteb351d75ce1e75b4f793d609efac08426ca50acd (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "10 patches. Subsystems affected by this patch series: MAINTAINERS and mm (slub, pagealloc, memcg, kasan, vmalloc, migration, hugetlb, memory-failure, and process_vm_access)" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm/process_vm_access.c: include compat.h mm,hwpoison: fix printing of page flags MAINTAINERS: add Vlastimil as slab allocators maintainer mm/hugetlb: fix potential missing huge page size info mm: migrate: initialize err in do_migrate_pages mm/vmalloc.c: fix potential memory leak arm/kasan: fix the array size of kasan_early_shadow_pte[] mm/memcontrol: fix warning in mem_cgroup_page_lruvec() mm/page_alloc: add a missing mm_page_alloc_zone_locked() tracepoint mm, slub: consider rest of partial list if acquire_slab() fails
-rw-r--r--MAINTAINERS1
-rw-r--r--include/linux/kasan.h6
-rw-r--r--include/linux/memcontrol.h2
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/kasan/init.c3
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c31
-rw-r--r--mm/process_vm_access.c1
-rw-r--r--mm/slub.c2
-rw-r--r--mm/vmalloc.c4
11 files changed, 33 insertions, 23 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 79b400c97059..00836f6452f0 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -16313,6 +16313,7 @@ M: Pekka Enberg <penberg@kernel.org>
M: David Rientjes <rientjes@google.com>
M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
M: Andrew Morton <akpm@linux-foundation.org>
+M: Vlastimil Babka <vbabka@suse.cz>
L: linux-mm@kvack.org
S: Maintained
F: include/linux/sl?b*.h
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5e0655fb2a6f..fe1ae73ff8b5 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -35,8 +35,12 @@ struct kunit_kasan_expectation {
#define KASAN_SHADOW_INIT 0
#endif
+#ifndef PTE_HWTABLE_PTRS
+#define PTE_HWTABLE_PTRS 0
+#endif
+
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
-extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE];
+extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index d827bd7f3bfe..eeb0b52203e9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -665,7 +665,7 @@ static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
{
struct mem_cgroup *memcg = page_memcg(page);
- VM_WARN_ON_ONCE_PAGE(!memcg, page);
+ VM_WARN_ON_ONCE_PAGE(!memcg && !mem_cgroup_disabled(), page);
return mem_cgroup_lruvec(memcg, pgdat);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a2602969873d..18f6ee317900 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4371,7 +4371,7 @@ retry:
* So we need to block hugepage fault by PG_hwpoison bit check.
*/
if (unlikely(PageHWPoison(page))) {
- ret = VM_FAULT_HWPOISON |
+ ret = VM_FAULT_HWPOISON_LARGE |
VM_FAULT_SET_HINDEX(hstate_index(h));
goto backout_unlocked;
}
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index bc0ad208b3a7..7ca0b92d5886 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -64,7 +64,8 @@ static inline bool kasan_pmd_table(pud_t pud)
return false;
}
#endif
-pte_t kasan_early_shadow_pte[PTRS_PER_PTE] __page_aligned_bss;
+pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS]
+ __page_aligned_bss;
static inline bool kasan_pte_table(pmd_t pmd)
{
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 5a38e9eade94..04d9f154a130 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1940,7 +1940,7 @@ retry:
goto retry;
}
} else if (ret == -EIO) {
- pr_info("%s: %#lx: unknown page type: %lx (%pGP)\n",
+ pr_info("%s: %#lx: unknown page type: %lx (%pGp)\n",
__func__, pfn, page->flags, &page->flags);
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 8cf96bd21341..2c3a86502053 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1111,7 +1111,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to, int flags)
{
int busy = 0;
- int err;
+ int err = 0;
nodemask_t tmp;
migrate_prep();
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index bdbec4c98173..027f6481ba59 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2862,20 +2862,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
{
struct page *page;
-#ifdef CONFIG_CMA
- /*
- * Balance movable allocations between regular and CMA areas by
- * allocating from CMA when over half of the zone's free memory
- * is in the CMA area.
- */
- if (alloc_flags & ALLOC_CMA &&
- zone_page_state(zone, NR_FREE_CMA_PAGES) >
- zone_page_state(zone, NR_FREE_PAGES) / 2) {
- page = __rmqueue_cma_fallback(zone, order);
- if (page)
- return page;
+ if (IS_ENABLED(CONFIG_CMA)) {
+ /*
+ * Balance movable allocations between regular and CMA areas by
+ * allocating from CMA when over half of the zone's free memory
+ * is in the CMA area.
+ */
+ if (alloc_flags & ALLOC_CMA &&
+ zone_page_state(zone, NR_FREE_CMA_PAGES) >
+ zone_page_state(zone, NR_FREE_PAGES) / 2) {
+ page = __rmqueue_cma_fallback(zone, order);
+ if (page)
+ goto out;
+ }
}
-#endif
retry:
page = __rmqueue_smallest(zone, order, migratetype);
if (unlikely(!page)) {
@@ -2886,8 +2886,9 @@ retry:
alloc_flags))
goto retry;
}
-
- trace_mm_page_alloc_zone_locked(page, order, migratetype);
+out:
+ if (page)
+ trace_mm_page_alloc_zone_locked(page, order, migratetype);
return page;
}
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index 4bcc11958089..f5fee9cf90f8 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -9,6 +9,7 @@
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/sched.h>
+#include <linux/compat.h>
#include <linux/sched/mm.h>
#include <linux/highmem.h>
#include <linux/ptrace.h>
diff --git a/mm/slub.c b/mm/slub.c
index dc5b42e700b8..d9e4e10683cc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1973,7 +1973,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
t = acquire_slab(s, n, page, object == NULL, &objects);
if (!t)
- break;
+ continue; /* cmpxchg raced */
available += objects;
if (!object) {
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 4d88fe5a277a..e6f352bf0498 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -2420,8 +2420,10 @@ void *vmap(struct page **pages, unsigned int count,
return NULL;
}
- if (flags & VM_MAP_PUT_PAGES)
+ if (flags & VM_MAP_PUT_PAGES) {
area->pages = pages;
+ area->nr_pages = count;
+ }
return area->addr;
}
EXPORT_SYMBOL(vmap);