提交 cb810ad2 编写于 作者: M Mel Gorman 提交者: Linus Torvalds

mm, compaction: rework compact_should_abort as compact_check_resched

With incremental changes, compact_should_abort no longer makes any
documented sense.  Rename to compact_check_resched and update the
associated comments.  There is no benefit other than reducing redundant
code and making the intent slightly clearer.  It could potentially be
merged with earlier patches but it just makes the review slightly
harder.

Link: http://lkml.kernel.org/r/20190118175136.31341-17-mgorman@techsingularity.netSigned-off-by: NMel Gorman <mgorman@techsingularity.net>
Acked-by: NVlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 8854c55f
...@@ -404,6 +404,21 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags, ...@@ -404,6 +404,21 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
return true; return true;
} }
/*
* Aside from avoiding lock contention, compaction also periodically checks
* need_resched() and records async compaction as contended if necessary.
*/
static inline void compact_check_resched(struct compact_control *cc)
{
/* async compaction aborts if contended */
if (need_resched()) {
if (cc->mode == MIGRATE_ASYNC)
cc->contended = true;
cond_resched();
}
}
/* /*
* Compaction requires the taking of some coarse locks that are potentially * Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. The lock should be periodically unlocked to avoid * very heavily contended. The lock should be periodically unlocked to avoid
...@@ -432,33 +447,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock, ...@@ -432,33 +447,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
return true; return true;
} }
if (need_resched()) { compact_check_resched(cc);
if (cc->mode == MIGRATE_ASYNC)
cc->contended = true;
cond_resched();
}
return false;
}
/*
* Aside from avoiding lock contention, compaction also periodically checks
* need_resched() and either schedules in sync compaction or aborts async
* compaction. This is similar to what compact_unlock_should_abort() does, but
* is used where no lock is concerned.
*
* Returns false when no scheduling was needed, or sync compaction scheduled.
* Returns true when async compaction should abort.
*/
static inline bool compact_should_abort(struct compact_control *cc)
{
/* async compaction aborts if contended */
if (need_resched()) {
if (cc->mode == MIGRATE_ASYNC)
cc->contended = true;
cond_resched();
}
return false; return false;
} }
...@@ -747,8 +736,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, ...@@ -747,8 +736,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
return 0; return 0;
} }
if (compact_should_abort(cc)) compact_check_resched(cc);
return 0;
if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
skip_on_failure = true; skip_on_failure = true;
...@@ -1379,12 +1367,10 @@ static void isolate_freepages(struct compact_control *cc) ...@@ -1379,12 +1367,10 @@ static void isolate_freepages(struct compact_control *cc)
isolate_start_pfn = block_start_pfn) { isolate_start_pfn = block_start_pfn) {
/* /*
* This can iterate a massively long zone without finding any * This can iterate a massively long zone without finding any
* suitable migration targets, so periodically check if we need * suitable migration targets, so periodically check resched.
* to schedule, or even abort async compaction.
*/ */
if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
&& compact_should_abort(cc)) compact_check_resched(cc);
break;
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone); zone);
...@@ -1677,11 +1663,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, ...@@ -1677,11 +1663,10 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
/* /*
* This can potentially iterate a massively long zone with * This can potentially iterate a massively long zone with
* many pageblocks unsuitable, so periodically check if we * many pageblocks unsuitable, so periodically check if we
* need to schedule, or even abort async compaction. * need to schedule.
*/ */
if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)) if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
&& compact_should_abort(cc)) compact_check_resched(cc);
break;
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone); zone);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册