summaryrefslogtreecommitdiff
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c53
1 files changed, 24 insertions, 29 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0fe8b7113868..744f926af442 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2418,8 +2418,6 @@ static void shrink_node_memcg(struct pglist_data *pgdat, struct mem_cgroup *memc
if (inactive_list_is_low(lruvec, false, sc))
shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
sc, LRU_ACTIVE_ANON);
-
- throttle_vm_writeout(sc->gfp_mask);
}
/* Use reclaim/compaction for costly allocs or under memory pressure */
@@ -2480,7 +2478,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
* If we have not reclaimed enough pages for compaction and the
* inactive lists are large enough, continue reclaiming
*/
- pages_for_compaction = (2UL << sc->order);
+ pages_for_compaction = compact_gap(sc->order);
inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
if (get_nr_swap_pages() > 0)
inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
@@ -2495,7 +2493,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
continue;
switch (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx)) {
- case COMPACT_PARTIAL:
+ case COMPACT_SUCCESS:
case COMPACT_CONTINUE:
return false;
default:
@@ -2598,38 +2596,35 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc)
}
/*
- * Returns true if compaction should go ahead for a high-order request, or
- * the high-order allocation would succeed without compaction.
+ * Returns true if compaction should go ahead for a costly-order request, or
+ * the allocation would already succeed without compaction. Return false if we
+ * should reclaim first.
*/
static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
{
unsigned long watermark;
- bool watermark_ok;
+ enum compact_result suitable;
- /*
- * Compaction takes time to run and there are potentially other
- * callers using the pages just freed. Continue reclaiming until
- * there is a buffer of free pages available to give compaction
- * a reasonable chance of completing and allocating the page
- */
- watermark = high_wmark_pages(zone) + (2UL << sc->order);
- watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
-
- /*
- * If compaction is deferred, reclaim up to a point where
- * compaction will have a chance of success when re-enabled
- */
- if (compaction_deferred(zone, sc->order))
- return watermark_ok;
+ suitable = compaction_suitable(zone, sc->order, 0, sc->reclaim_idx);
+ if (suitable == COMPACT_SUCCESS)
+ /* Allocation should succeed already. Don't reclaim. */
+ return true;
+ if (suitable == COMPACT_SKIPPED)
+ /* Compaction cannot yet proceed. Do reclaim. */
+ return false;
/*
- * If compaction is not ready to start and allocation is not likely
- * to succeed without it, then keep reclaiming.
+ * Compaction is already possible, but it takes time to run and there
+ * are potentially other callers using the pages just freed. So proceed
+ * with reclaim to make a buffer of free pages available to give
+ * compaction a reasonable chance of completing and allocating the page.
+ * Note that we won't actually reclaim the whole buffer in one attempt
+ * as the target watermark in should_continue_reclaim() is lower. But if
+ * we are already above the high+gap watermark, don't reclaim at all.
*/
- if (compaction_suitable(zone, sc->order, 0, sc->reclaim_idx) == COMPACT_SKIPPED)
- return false;
+ watermark = high_wmark_pages(zone) + compact_gap(sc->order);
- return watermark_ok;
+ return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
}
/*
@@ -3041,7 +3036,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
*/
nid = mem_cgroup_select_victim_node(memcg);
- zonelist = NODE_DATA(nid)->node_zonelists;
+ zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK];
trace_mm_vmscan_memcg_reclaim_begin(0,
sc.may_writepage,
@@ -3169,7 +3164,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
* excessive reclaim. Assume that a process requested a high-order
* can direct reclaim/compact.
*/
- if (sc->order && sc->nr_reclaimed >= 2UL << sc->order)
+ if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
sc->order = 0;
return sc->nr_scanned >= sc->nr_to_reclaim;