summaryrefslogtreecommitdiff
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/backing-dev.c18
-rw-r--r--mm/hwpoison-inject.c13
-rw-r--r--mm/kmemleak.c3
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/memory-failure.c16
-rw-r--r--mm/memory_hotplug.c4
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page-writeback.c6
-rw-r--r--mm/page_isolation.c3
-rw-r--r--mm/zsmalloc.c3
10 files changed, 33 insertions, 41 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 6dc4580df2af..000e7b3b9896 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
flush_delayed_work(&bdi->wb.dwork);
}
-/*
- * Called when the device behind @bdi has been removed or ejected.
- *
- * We can't really do much here except for reducing the dirty ratio at
- * the moment. In the future we should be able to set a flag so that
- * the filesystem can handle errors at mark_inode_dirty time instead
- * of only at writeback time.
- */
-void bdi_unregister(struct backing_dev_info *bdi)
-{
- if (WARN_ON_ONCE(!bdi->dev))
- return;
-
- bdi_set_min_ratio(bdi, 0);
-}
-EXPORT_SYMBOL(bdi_unregister);
-
static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
{
memset(wb, 0, sizeof(*wb));
@@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
int i;
bdi_wb_shutdown(bdi);
+ bdi_set_min_ratio(bdi, 0);
WARN_ON(!list_empty(&bdi->work_list));
WARN_ON(delayed_work_pending(&bdi->wb.dwork));
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 329caf56df22..4ca5fe0042e1 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -34,13 +34,13 @@ static int hwpoison_inject(void *data, u64 val)
if (!hwpoison_filter_enable)
goto inject;
- if (!PageLRU(p) && !PageHuge(p))
- shake_page(p, 0);
+ if (!PageLRU(hpage) && !PageHuge(p))
+ shake_page(hpage, 0);
/*
* This implies unable to support non-LRU pages.
*/
- if (!PageLRU(p) && !PageHuge(p))
- return 0;
+ if (!PageLRU(hpage) && !PageHuge(p))
+ goto put_out;
/*
* do a racy check with elevated page count, to make sure PG_hwpoison
@@ -52,11 +52,14 @@ static int hwpoison_inject(void *data, u64 val)
err = hwpoison_filter(hpage);
unlock_page(hpage);
if (err)
- return 0;
+ goto put_out;
inject:
pr_info("Injecting memory failure at pfn %#lx\n", pfn);
return memory_failure(pfn, 18, MF_COUNT_INCREASED);
+put_out:
+ put_page(hpage);
+ return 0;
}
static int hwpoison_unpoison(void *data, u64 val)
diff --git a/mm/kmemleak.c b/mm/kmemleak.c
index 5405aff5a590..f0fe4f2c1fa7 100644
--- a/mm/kmemleak.c
+++ b/mm/kmemleak.c
@@ -115,7 +115,8 @@
#define BYTES_PER_POINTER sizeof(void *)
/* GFP bitmask for kmemleak internal allocations */
-#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
+#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
+ __GFP_NOACCOUNT)) | \
__GFP_NORETRY | __GFP_NOMEMALLOC | \
__GFP_NOWARN)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 14c2f2017e37..a04225d372ba 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2323,6 +2323,8 @@ done_restock:
css_get_many(&memcg->css, batch);
if (batch > nr_pages)
refill_stock(memcg, batch - nr_pages);
+ if (!(gfp_mask & __GFP_WAIT))
+ goto done;
/*
* If the hierarchy is above the normal consumption range,
* make the charging task trim their excess contribution.
@@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
if (!mem_cgroup_is_root(memcg))
page_counter_uncharge(&memcg->memory, 1);
- /* XXX: caller holds IRQ-safe mapping->tree_lock */
- VM_BUG_ON(!irqs_disabled());
-
+ /* Caller disabled preemption with mapping->tree_lock */
mem_cgroup_charge_statistics(memcg, page, -1);
memcg_check_events(memcg, page);
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d9359b770cd9..501820c815b3 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1187,10 +1187,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
- if (!PageHuge(p) && !PageTransTail(p)) {
- if (!PageLRU(p))
- shake_page(p, 0);
- if (!PageLRU(p)) {
+ if (!PageHuge(p)) {
+ if (!PageLRU(hpage))
+ shake_page(hpage, 0);
+ if (!PageLRU(hpage)) {
/*
* shake_page could have turned it free.
*/
@@ -1777,12 +1777,12 @@ int soft_offline_page(struct page *page, int flags)
} else if (ret == 0) { /* for free pages */
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_order(hpage),
+ if (!dequeue_hwpoisoned_huge_page(hpage))
+ atomic_long_add(1 << compound_order(hpage),
&num_poisoned_pages);
} else {
- SetPageHWPoison(page);
- atomic_long_inc(&num_poisoned_pages);
+ if (!TestSetPageHWPoison(page))
+ atomic_long_inc(&num_poisoned_pages);
}
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 457bde530cbe..9e88f749aa51 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1969,8 +1969,10 @@ void try_offline_node(int nid)
* wait_table may be allocated from boot memory,
* here only free if it's allocated by vmalloc.
*/
- if (is_vmalloc_addr(zone->wait_table))
+ if (is_vmalloc_addr(zone->wait_table)) {
vfree(zone->wait_table);
+ zone->wait_table = NULL;
+ }
}
}
EXPORT_SYMBOL(try_offline_node);
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index ede26291d4aa..747743237d9f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2518,7 +2518,7 @@ static void __init check_numabalancing_enable(void)
if (numabalancing_override)
set_numabalancing_state(numabalancing_override == 1);
- if (nr_node_ids > 1 && !numabalancing_override) {
+ if (num_online_nodes() > 1 && !numabalancing_override) {
pr_info("%s automatic NUMA balancing. "
"Configure with numa_balancing= or the "
"kernel.numa_balancing sysctl",
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 5daf5568b9e1..eb59f7eea508 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
long x;
x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
- limit - setpoint + 1);
+ (limit - setpoint) | 1);
pos_ratio = x;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
* scale global setpoint to bdi's:
* bdi_setpoint = setpoint * bdi_thresh / thresh
*/
- x = div_u64((u64)bdi_thresh << 16, thresh + 1);
+ x = div_u64((u64)bdi_thresh << 16, thresh | 1);
bdi_setpoint = setpoint * (u64)x >> 16;
/*
* Use span=(8*write_bw) in single bdi case as indicated by
@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
if (bdi_dirty < x_intercept - span / 4) {
pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
- x_intercept - bdi_setpoint + 1);
+ (x_intercept - bdi_setpoint) | 1);
} else
pos_ratio /= 4;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 755a42c76eb4..303c908790ef 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -101,7 +101,8 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
buddy_idx = __find_buddy_index(page_idx, order);
buddy = page + (buddy_idx - page_idx);
- if (!is_migrate_isolate_page(buddy)) {
+ if (pfn_valid_within(page_to_pfn(buddy)) &&
+ !is_migrate_isolate_page(buddy)) {
__isolate_free_page(page, order);
kernel_map_pages(page, (1 << order), 1);
set_page_refcounted(page);
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index 08bd7a3d464a..a8b5e749e84e 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool)
static void destroy_handle_cache(struct zs_pool *pool)
{
- kmem_cache_destroy(pool->handle_cachep);
+ if (pool->handle_cachep)
+ kmem_cache_destroy(pool->handle_cachep);
}
static unsigned long alloc_handle(struct zs_pool *pool)