aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--mm/compaction.c23
1 files changed, 4 insertions, 19 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 68e3c214bcbd..9c7d43fd4655 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -405,21 +405,6 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
}
/*
- * Aside from avoiding lock contention, compaction also periodically checks
- * need_resched() and records async compaction as contended if necessary.
- */
-static inline void compact_check_resched(struct compact_control *cc)
-{
- /* async compaction aborts if contended */
- if (need_resched()) {
- if (cc->mode == MIGRATE_ASYNC)
- cc->contended = true;
-
- cond_resched();
- }
-}
-
-/*
* Compaction requires the taking of some coarse locks that are potentially
* very heavily contended. The lock should be periodically unlocked to avoid
* having disabled IRQs for a long time, even when there is nobody waiting on
@@ -447,7 +432,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
return true;
}
- compact_check_resched(cc);
+ cond_resched();
return false;
}
@@ -736,7 +721,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
return 0;
}
- compact_check_resched(cc);
+ cond_resched();
if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
skip_on_failure = true;
@@ -1370,7 +1355,7 @@ static void isolate_freepages(struct compact_control *cc)
* suitable migration targets, so periodically check resched.
*/
if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
- compact_check_resched(cc);
+ cond_resched();
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);
@@ -1666,7 +1651,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
* need to schedule.
*/
if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
- compact_check_resched(cc);
+ cond_resched();
page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
zone);