diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 61 |
1 files changed, 23 insertions, 38 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 78ae182aaf34..68e3c214bcbd 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -405,6 +405,21 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, } /* + * Aside from avoiding lock contention, compaction also periodically checks + * need_resched() and records async compaction as contended if necessary. + */ +static inline void compact_check_resched(struct compact_control *cc) +{ + /* async compaction aborts if contended */ + if (need_resched()) { + if (cc->mode == MIGRATE_ASYNC) + cc->contended = true; + + cond_resched(); + } +} + +/* * Compaction requires the taking of some coarse locks that are potentially * very heavily contended. The lock should be periodically unlocked to avoid * having disabled IRQs for a long time, even when there is nobody waiting on @@ -432,33 +447,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock, return true; } - if (need_resched()) { - if (cc->mode == MIGRATE_ASYNC) - cc->contended = true; - cond_resched(); - } - - return false; -} - -/* - * Aside from avoiding lock contention, compaction also periodically checks - * need_resched() and either schedules in sync compaction or aborts async - * compaction. This is similar to what compact_unlock_should_abort() does, but - * is used where no lock is concerned. - * - * Returns false when no scheduling was needed, or sync compaction scheduled. - * Returns true when async compaction should abort. - */ -static inline bool compact_should_abort(struct compact_control *cc) -{ - /* async compaction aborts if contended */ - if (need_resched()) { - if (cc->mode == MIGRATE_ASYNC) - cc->contended = true; - - cond_resched(); - } + compact_check_resched(cc); return false; } @@ -747,8 +736,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, return 0; } - if (compact_should_abort(cc)) - return 0; + compact_check_resched(cc); if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { skip_on_failure = true; @@ -1379,12 +1367,10 @@ static void isolate_freepages(struct compact_control *cc) isolate_start_pfn = block_start_pfn) { /* * This can iterate a massively long zone without finding any - * suitable migration targets, so periodically check if we need - * to schedule, or even abort async compaction. + * suitable migration targets, so periodically check resched. */ - if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) - && compact_should_abort(cc)) - break; + if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) + compact_check_resched(cc); page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, zone); @@ -1677,11 +1663,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, /* * This can potentially iterate a massively long zone with * many pageblocks unsuitable, so periodically check if we - * need to schedule, or even abort async compaction. + * need to schedule. */ - if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) - && compact_should_abort(cc)) - break; + if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) + compact_check_resched(cc); page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, zone); |