summaryrefslogtreecommitdiff
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c87
1 files changed, 76 insertions, 11 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 1067c07cb33d..26bb20ef853d 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -223,9 +223,21 @@ static void update_pageblock_skip(struct compact_control *cc,
}
#endif /* CONFIG_COMPACTION */
-static inline bool should_release_lock(spinlock_t *lock)
+static int should_release_lock(spinlock_t *lock)
{
- return need_resched() || spin_is_contended(lock);
+ /*
+ * Sched contention has higher priority here as we may potentially
+ * have to abort whole compaction ASAP. Returning with lock contention
+ * means we will try another zone, and further decisions are
+ * influenced only when all zones are lock contended. That means
+ * potentially missing a lock contention is less critical.
+ */
+ if (need_resched())
+ return COMPACT_CONTENDED_SCHED;
+ else if (spin_is_contended(lock))
+ return COMPACT_CONTENDED_LOCK;
+
+ return COMPACT_CONTENDED_NONE;
}
/*
@@ -240,7 +252,9 @@ static inline bool should_release_lock(spinlock_t *lock)
static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
bool locked, struct compact_control *cc)
{
- if (should_release_lock(lock)) {
+ int contended = should_release_lock(lock);
+
+ if (contended) {
if (locked) {
spin_unlock_irqrestore(lock, *flags);
locked = false;
@@ -248,7 +262,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
/* async aborts if taking too long or contended */
if (cc->mode == MIGRATE_ASYNC) {
- cc->contended = true;
+ cc->contended = contended;
return false;
}
@@ -274,7 +288,7 @@ static inline bool compact_should_abort(struct compact_control *cc)
/* async compaction aborts if contended */
if (need_resched()) {
if (cc->mode == MIGRATE_ASYNC) {
- cc->contended = true;
+ cc->contended = COMPACT_CONTENDED_SCHED;
return true;
}
@@ -1140,7 +1154,7 @@ out:
}
static unsigned long compact_zone_order(struct zone *zone, int order,
- gfp_t gfp_mask, enum migrate_mode mode, bool *contended)
+ gfp_t gfp_mask, enum migrate_mode mode, int *contended)
{
unsigned long ret;
struct compact_control cc = {
@@ -1172,14 +1186,15 @@ int sysctl_extfrag_threshold = 500;
* @gfp_mask: The GFP mask of the current allocation
* @nodemask: The allowed nodes to allocate from
* @mode: The migration mode for async, sync light, or sync migration
- * @contended: Return value that is true if compaction was aborted due to lock contention
+ * @contended: Return value that determines if compaction was aborted due to
+ * need_resched() or lock contention
* @candidate_zone: Return the zone where we think allocation should succeed
*
* This is the main entry point for direct page compaction.
*/
unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask,
- enum migrate_mode mode, bool *contended,
+ enum migrate_mode mode, int *contended,
struct zone **candidate_zone)
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
@@ -1189,6 +1204,9 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
struct zone *zone;
int rc = COMPACT_DEFERRED;
int alloc_flags = 0;
+ int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */
+
+ *contended = COMPACT_CONTENDED_NONE;
/* Check if the GFP flags allow compaction */
if (!order || !may_enter_fs || !may_perform_io)
@@ -1202,13 +1220,19 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
nodemask) {
int status;
+ int zone_contended;
if (compaction_deferred(zone, order))
continue;
status = compact_zone_order(zone, order, gfp_mask, mode,
- contended);
+ &zone_contended);
rc = max(status, rc);
+ /*
+ * It takes at least one zone that wasn't lock contended
+ * to clear all_zones_contended.
+ */
+ all_zones_contended &= zone_contended;
/* If a normal allocation would succeed, stop compacting */
if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
@@ -1221,8 +1245,21 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
* succeeds in this zone.
*/
compaction_defer_reset(zone, order, false);
- break;
- } else if (mode != MIGRATE_ASYNC) {
+ /*
+ * It is possible that async compaction aborted due to
+ * need_resched() and the watermarks were ok thanks to
+ * somebody else freeing memory. The allocation can
+ * however still fail so we better signal the
+ * need_resched() contention anyway (this will not
+ * prevent the allocation attempt).
+ */
+ if (zone_contended == COMPACT_CONTENDED_SCHED)
+ *contended = COMPACT_CONTENDED_SCHED;
+
+ goto break_loop;
+ }
+
+ if (mode != MIGRATE_ASYNC) {
/*
* We think that allocation won't succeed in this zone
* so we defer compaction there. If it ends up
@@ -1230,8 +1267,36 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
*/
defer_compaction(zone, order);
}
+
+ /*
+ * We might have stopped compacting due to need_resched() in
+ * async compaction, or due to a fatal signal detected. In that
+ * case do not try further zones and signal need_resched()
+ * contention.
+ */
+ if ((zone_contended == COMPACT_CONTENDED_SCHED)
+ || fatal_signal_pending(current)) {
+ *contended = COMPACT_CONTENDED_SCHED;
+ goto break_loop;
+ }
+
+ continue;
+break_loop:
+ /*
+ * We might not have tried all the zones, so be conservative
+ * and assume they are not all lock contended.
+ */
+ all_zones_contended = 0;
+ break;
}
+ /*
+ * If at least one zone wasn't deferred or skipped, we report if all
+ * zones that were tried were lock contended.
+ */
+ if (rc > COMPACT_SKIPPED && all_zones_contended)
+ *contended = COMPACT_CONTENDED_LOCK;
+
return rc;
}