diff options
-rw-r--r-- | mm/page_alloc.c | 32 |
1 files changed, 14 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8380011d77db..a4124936303b 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2680,12 +2680,10 @@ get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags, { struct zoneref *z; struct zone *zone; - bool fair_skipped; - bool zonelist_rescan; + bool fair_skipped = false; + bool apply_fair = (alloc_flags & ALLOC_FAIR); zonelist_scan: - zonelist_rescan = false; - /* * Scan zonelist, looking for a zone with enough free. * See also __cpuset_node_allowed() comment in kernel/cpuset.c. @@ -2705,13 +2703,16 @@ zonelist_scan: * page was allocated in should have no effect on the * time the page has in memory before being reclaimed. */ - if (alloc_flags & ALLOC_FAIR) { - if (!zone_local(ac->preferred_zone, zone)) - break; + if (apply_fair) { if (test_bit(ZONE_FAIR_DEPLETED, &zone->flags)) { fair_skipped = true; continue; } + if (!zone_local(ac->preferred_zone, zone)) { + if (fair_skipped) + goto reset_fair; + apply_fair = false; + } } /* * When allocating a page cache page for writing, we @@ -2800,18 +2801,13 @@ try_this_zone: * include remote zones now, before entering the slowpath and waking * kswapd: prefer spilling to a remote zone over swapping locally. */ - if (alloc_flags & ALLOC_FAIR) { - alloc_flags &= ~ALLOC_FAIR; - if (fair_skipped) { - zonelist_rescan = true; - reset_alloc_batches(ac->preferred_zone); - } - if (nr_online_nodes > 1) - zonelist_rescan = true; - } - - if (zonelist_rescan) + if (fair_skipped) { +reset_fair: + apply_fair = false; + fair_skipped = false; + reset_alloc_batches(ac->preferred_zone); goto zonelist_scan; + } return NULL; } |