[15/25] mm, compaction: Finish pageblock scanning on contention
diff mbox series

Message ID 20190104125011.16071-16-mgorman@techsingularity.net
State In Next
Commit 94d5992baaa56d65c9c837b82918fce77f850099
Headers show
Series
  • Increase success rates and reduce latency of compaction v2
Related show

Commit Message

Mel Gorman Jan. 4, 2019, 12:50 p.m. UTC
Async migration aborts on spinlock contention but contention can be high
when there are multiple compaction attempts and kswapd is active. The
consequence is that the migration scanners move forward uselessly while
still contending on locks for longer while leaving suitable migration
sources behind.

This patch will acquire the lock but track when contention occurs. When
it does, the current pageblock will finish as compaction may succeed for
that block and then abort. This will have a variable impact on latency as
in some cases useless scanning is avoided (reduces latency) but a lock
will be contended (increase latency) or a single contended pageblock is
scanned that would otherwise have been skipped (increase latency).

                                        4.20.0                 4.20.0
                                norescan-v2r15    finishcontend-v2r15
Amean     fault-both-1         0.00 (   0.00%)        0.00 *   0.00%*
Amean     fault-both-3      2872.13 (   0.00%)     2973.08 (  -3.51%)
Amean     fault-both-5      4330.56 (   0.00%)     3870.19 (  10.63%)
Amean     fault-both-7      6496.63 (   0.00%)     6580.50 (  -1.29%)
Amean     fault-both-12    10280.59 (   0.00%)     9527.40 (   7.33%)
Amean     fault-both-18    11079.19 (   0.00%)    13395.86 * -20.91%*
Amean     fault-both-24    17207.80 (   0.00%)    14936.94 *  13.20%*
Amean     fault-both-30    17736.13 (   0.00%)    16748.46 (   5.57%)
Amean     fault-both-32    18509.41 (   0.00%)    18521.30 (  -0.06%)

                                   4.20.0                 4.20.0
                           norescan-v2r15    finishcontend-v2r15
Percentage huge-1         0.00 (   0.00%)        0.00 (   0.00%)
Percentage huge-3        96.87 (   0.00%)       97.57 (   0.72%)
Percentage huge-5        94.63 (   0.00%)       96.88 (   2.39%)
Percentage huge-7        93.83 (   0.00%)       95.47 (   1.74%)
Percentage huge-12       92.65 (   0.00%)       98.64 (   6.47%)
Percentage huge-18       93.66 (   0.00%)       98.33 (   4.98%)
Percentage huge-24       93.15 (   0.00%)       98.88 (   6.15%)
Percentage huge-30       93.16 (   0.00%)       97.09 (   4.21%)
Percentage huge-32       92.58 (   0.00%)       96.20 (   3.92%)

As expected, a variable impact on latency while allocation success
rates are slightly higher. System CPU usage is reduced by about 10%
but scan rate impact is mixed

Compaction migrate scanned    31772603    19980216
Compaction free scanned       63267928   120381828

Migration scan rates are reduced 37% which is expected as a pageblock
is used by the async scanner instead of skipped but the free scanning is
increased. This can be partially accounted for by the increased success
rate but also by the fact that the scanners do not meet for longer when
pageblocks are actually used. Overall this is justified and completing
a pageblock scan is very important for later patches.

Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
---
 mm/compaction.c | 95 +++++++++++++++++++++++----------------------------------
 1 file changed, 39 insertions(+), 56 deletions(-)

Comments

Vlastimil Babka Jan. 17, 2019, 4:38 p.m. UTC | #1
On 1/4/19 1:50 PM, Mel Gorman wrote:
> Async migration aborts on spinlock contention but contention can be high
> when there are multiple compaction attempts and kswapd is active. The
> consequence is that the migration scanners move forward uselessly while
> still contending on locks for longer while leaving suitable migration
> sources behind.
> 
> This patch will acquire the lock but track when contention occurs. When
> it does, the current pageblock will finish as compaction may succeed for
> that block and then abort. This will have a variable impact on latency as
> in some cases useless scanning is avoided (reduces latency) but a lock
> will be contended (increase latency) or a single contended pageblock is
> scanned that would otherwise have been skipped (increase latency).
> 
>                                         4.20.0                 4.20.0
>                                 norescan-v2r15    finishcontend-v2r15
> Amean     fault-both-1         0.00 (   0.00%)        0.00 *   0.00%*
> Amean     fault-both-3      2872.13 (   0.00%)     2973.08 (  -3.51%)
> Amean     fault-both-5      4330.56 (   0.00%)     3870.19 (  10.63%)
> Amean     fault-both-7      6496.63 (   0.00%)     6580.50 (  -1.29%)
> Amean     fault-both-12    10280.59 (   0.00%)     9527.40 (   7.33%)
> Amean     fault-both-18    11079.19 (   0.00%)    13395.86 * -20.91%*
> Amean     fault-both-24    17207.80 (   0.00%)    14936.94 *  13.20%*
> Amean     fault-both-30    17736.13 (   0.00%)    16748.46 (   5.57%)
> Amean     fault-both-32    18509.41 (   0.00%)    18521.30 (  -0.06%)
> 
>                                    4.20.0                 4.20.0
>                            norescan-v2r15    finishcontend-v2r15
> Percentage huge-1         0.00 (   0.00%)        0.00 (   0.00%)
> Percentage huge-3        96.87 (   0.00%)       97.57 (   0.72%)
> Percentage huge-5        94.63 (   0.00%)       96.88 (   2.39%)
> Percentage huge-7        93.83 (   0.00%)       95.47 (   1.74%)
> Percentage huge-12       92.65 (   0.00%)       98.64 (   6.47%)
> Percentage huge-18       93.66 (   0.00%)       98.33 (   4.98%)
> Percentage huge-24       93.15 (   0.00%)       98.88 (   6.15%)
> Percentage huge-30       93.16 (   0.00%)       97.09 (   4.21%)
> Percentage huge-32       92.58 (   0.00%)       96.20 (   3.92%)
> 
> As expected, a variable impact on latency while allocation success
> rates are slightly higher. System CPU usage is reduced by about 10%
> but scan rate impact is mixed
> 
> Compaction migrate scanned    31772603    19980216
> Compaction free scanned       63267928   120381828
> 
> Migration scan rates are reduced 37% which is expected as a pageblock
> is used by the async scanner instead of skipped but the free scanning is
> increased. This can be partially accounted for by the increased success
> rate but also by the fact that the scanners do not meet for longer when
> pageblocks are actually used. Overall this is justified and completing
> a pageblock scan is very important for later patches.
> 
> Signed-off-by: Mel Gorman <mgorman@techsingularity.net>

Acked-by: Vlastimil Babka <vbabka@suse.cz>

Some comments below.

> @@ -538,18 +535,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
>  		 * recheck as well.
>  		 */
>  		if (!locked) {
> -			/*
> -			 * The zone lock must be held to isolate freepages.
> -			 * Unfortunately this is a very coarse lock and can be
> -			 * heavily contended if there are parallel allocations
> -			 * or parallel compactions. For async compaction do not
> -			 * spin on the lock and we acquire the lock as late as
> -			 * possible.
> -			 */
> -			locked = compact_trylock_irqsave(&cc->zone->lock,
> +			locked = compact_lock_irqsave(&cc->zone->lock,
>  								&flags, cc);
> -			if (!locked)
> -				break;

Seems a bit dangerous to continue compact_lock_irqsave() to return bool that
however now always returns true, and remove the safety checks that test the
result. Easy for somebody in the future to reintroduce some 'return false'
condition (even though the name now says lock and not trylock) and start
crashing. I would either change it to return void, or leave the checks in place.

>  
>  			/* Recheck this is a buddy page under lock */
>  			if (!PageBuddy(page))
> @@ -910,15 +897,9 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
>  
>  		/* If we already hold the lock, we can skip some rechecking */
>  		if (!locked) {
> -			locked = compact_trylock_irqsave(zone_lru_lock(zone),
> +			locked = compact_lock_irqsave(zone_lru_lock(zone),
>  								&flags, cc);
>  
> -			/* Allow future scanning if the lock is contended */
> -			if (!locked) {
> -				clear_pageblock_skip(page);
> -				break;
> -			}

Ditto.

> -
>  			/* Try get exclusive access under lock */
>  			if (!skip_updated) {
>  				skip_updated = true;
> @@ -961,9 +942,12 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
>  
>  		/*
>  		 * Avoid isolating too much unless this block is being
> -		 * rescanned (e.g. dirty/writeback pages, parallel allocation).
> +		 * rescanned (e.g. dirty/writeback pages, parallel allocation)
> +		 * or a lock is contended. For contention, isolate quickly to
> +		 * potentially remove one source of contention.
>  		 */
> -		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX && !cc->rescan) {
> +		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
> +		    !cc->rescan && !cc->contended) {
>  			++low_pfn;
>  			break;
>  		}
> @@ -1411,12 +1395,8 @@ static void isolate_freepages(struct compact_control *cc)
>  		isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
>  					freelist, false);
>  
> -		/*
> -		 * If we isolated enough freepages, or aborted due to lock
> -		 * contention, terminate.
> -		 */
> -		if ((cc->nr_freepages >= cc->nr_migratepages)
> -							|| cc->contended) {

Does it really make sense to continue in the case of free scanner, when we know
we will just return back the extra pages in the end? release_freepages() will
update the cached pfns, but the pageblock skip bit will stay, so we just leave
those pages behind. Unless finishing the block is important for the later
patches (as changelog mentions) even in the case of free scanner, but then we
can just skip the rest of it, as truly scanning it can't really help anything?

> +		/* Are enough freepages isolated? */
> +		if (cc->nr_freepages >= cc->nr_migratepages) {
>  			if (isolate_start_pfn >= block_end_pfn) {
>  				/*
>  				 * Restart at previous pageblock if more
Mel Gorman Jan. 17, 2019, 5:11 p.m. UTC | #2
On Thu, Jan 17, 2019 at 05:38:36PM +0100, Vlastimil Babka wrote:
> > rate but also by the fact that the scanners do not meet for longer when
> > pageblocks are actually used. Overall this is justified and completing
> > a pageblock scan is very important for later patches.
> > 
> > Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
> 
> Acked-by: Vlastimil Babka <vbabka@suse.cz>
> 
> Some comments below.
> 

Thanks

> > @@ -538,18 +535,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
> >  		 * recheck as well.
> >  		 */
> >  		if (!locked) {
> > -			/*
> > -			 * The zone lock must be held to isolate freepages.
> > -			 * Unfortunately this is a very coarse lock and can be
> > -			 * heavily contended if there are parallel allocations
> > -			 * or parallel compactions. For async compaction do not
> > -			 * spin on the lock and we acquire the lock as late as
> > -			 * possible.
> > -			 */
> > -			locked = compact_trylock_irqsave(&cc->zone->lock,
> > +			locked = compact_lock_irqsave(&cc->zone->lock,
> >  								&flags, cc);
> > -			if (!locked)
> > -				break;
> 
> Seems a bit dangerous to continue compact_lock_irqsave() to return bool that
> however now always returns true, and remove the safety checks that test the
> result. Easy for somebody in the future to reintroduce some 'return false'
> condition (even though the name now says lock and not trylock) and start
> crashing. I would either change it to return void, or leave the checks in place.
> 

I considered changing it from bool at the same time as "Rework
compact_should_abort as compact_check_resched". It turned out to be a
bit clumsy because the locked state must be explicitly updated in the
caller then. e.g.

locked = compact_lock_irqsave(...)

becomes

compact_lock_irqsave(...)
locked = true

I didn't think the result looked that great to be honest but maybe it's
worth revisiting as a cleanup patch like "Rework compact_should_abort as
compact_check_resched" on top.

> > 
> > @@ -1411,12 +1395,8 @@ static void isolate_freepages(struct compact_control *cc)
> >  		isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
> >  					freelist, false);
> >  
> > -		/*
> > -		 * If we isolated enough freepages, or aborted due to lock
> > -		 * contention, terminate.
> > -		 */
> > -		if ((cc->nr_freepages >= cc->nr_migratepages)
> > -							|| cc->contended) {
> 
> Does it really make sense to continue in the case of free scanner, when we know
> we will just return back the extra pages in the end? release_freepages() will
> update the cached pfns, but the pageblock skip bit will stay, so we just leave
> those pages behind. Unless finishing the block is important for the later
> patches (as changelog mentions) even in the case of free scanner, but then we
> can just skip the rest of it, as truly scanning it can't really help anything?
> 

Finishing is important for later patches is one factor but not the only
factor. While we eventually return all pages, we do not know at this
point in time how many free pages are needed. Remember the migration
source isolates COMPACT_CLUSTER_MAX pages and then looks for migration
targets.  If the source isolates 32 pages, free might isolate more from
one pageblock but that's ok as the migration source may need more free
pages in the immediate future. It's less wasteful than it looks at first
glance (or second or even third glance).

However, if we isolated exactly enough targets, and the pageblock gets
marked skipped, then each COMPACT_CLUSTER_MAX isolation from the target
could potentially marge one new pageblock unnecessarily and increase
scanning+resets overall. That would be bad.

There still can be waste because we do not know in advance exactly how
many migration sources there will be -- sure, we could calculate it but
that involves scanning the source pageblock twice which is wasteful.
I did try estimating it based on the remaining number of pages in the
pageblock but the additional complexity did not appear to help.

Does that make sense?
Vlastimil Babka Jan. 18, 2019, 8:57 a.m. UTC | #3
On 1/17/19 6:11 PM, Mel Gorman wrote:
> On Thu, Jan 17, 2019 at 05:38:36PM +0100, Vlastimil Babka wrote:
>> > rate but also by the fact that the scanners do not meet for longer when
>> > pageblocks are actually used. Overall this is justified and completing
>> > a pageblock scan is very important for later patches.
>> > 
>> > Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
>> 
>> Acked-by: Vlastimil Babka <vbabka@suse.cz>
>> 
>> Some comments below.
>> 
> 
> Thanks
> 
>> > @@ -538,18 +535,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
>> >  		 * recheck as well.
>> >  		 */
>> >  		if (!locked) {
>> > -			/*
>> > -			 * The zone lock must be held to isolate freepages.
>> > -			 * Unfortunately this is a very coarse lock and can be
>> > -			 * heavily contended if there are parallel allocations
>> > -			 * or parallel compactions. For async compaction do not
>> > -			 * spin on the lock and we acquire the lock as late as
>> > -			 * possible.
>> > -			 */
>> > -			locked = compact_trylock_irqsave(&cc->zone->lock,
>> > +			locked = compact_lock_irqsave(&cc->zone->lock,
>> >  								&flags, cc);
>> > -			if (!locked)
>> > -				break;
>> 
>> Seems a bit dangerous to continue compact_lock_irqsave() to return bool that
>> however now always returns true, and remove the safety checks that test the
>> result. Easy for somebody in the future to reintroduce some 'return false'
>> condition (even though the name now says lock and not trylock) and start
>> crashing. I would either change it to return void, or leave the checks in place.
>> 
> 
> I considered changing it from bool at the same time as "Rework
> compact_should_abort as compact_check_resched". It turned out to be a
> bit clumsy because the locked state must be explicitly updated in the
> caller then. e.g.
> 
> locked = compact_lock_irqsave(...)
> 
> becomes
> 
> compact_lock_irqsave(...)
> locked = true
> 
> I didn't think the result looked that great to be honest but maybe it's
> worth revisiting as a cleanup patch like "Rework compact_should_abort as
> compact_check_resched" on top.
> 
>> > 
>> > @@ -1411,12 +1395,8 @@ static void isolate_freepages(struct compact_control *cc)
>> >  		isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
>> >  					freelist, false);
>> >  
>> > -		/*
>> > -		 * If we isolated enough freepages, or aborted due to lock
>> > -		 * contention, terminate.
>> > -		 */
>> > -		if ((cc->nr_freepages >= cc->nr_migratepages)
>> > -							|| cc->contended) {
>> 
>> Does it really make sense to continue in the case of free scanner, when we know
>> we will just return back the extra pages in the end? release_freepages() will
>> update the cached pfns, but the pageblock skip bit will stay, so we just leave
>> those pages behind. Unless finishing the block is important for the later
>> patches (as changelog mentions) even in the case of free scanner, but then we
>> can just skip the rest of it, as truly scanning it can't really help anything?
>> 
> 
> Finishing is important for later patches is one factor but not the only
> factor. While we eventually return all pages, we do not know at this
> point in time how many free pages are needed. Remember the migration
> source isolates COMPACT_CLUSTER_MAX pages and then looks for migration
> targets.  If the source isolates 32 pages, free might isolate more from
> one pageblock but that's ok as the migration source may need more free
> pages in the immediate future. It's less wasteful than it looks at first
> glance (or second or even third glance).
> 
> However, if we isolated exactly enough targets, and the pageblock gets
> marked skipped, then each COMPACT_CLUSTER_MAX isolation from the target
> could potentially marge one new pageblock unnecessarily and increase
> scanning+resets overall. That would be bad.
> 
> There still can be waste because we do not know in advance exactly how
> many migration sources there will be -- sure, we could calculate it but
> that involves scanning the source pageblock twice which is wasteful.
> I did try estimating it based on the remaining number of pages in the
> pageblock but the additional complexity did not appear to help.
> 
> Does that make sense?

OK, thanks.

Patch
diff mbox series

diff --git a/mm/compaction.c b/mm/compaction.c
index 9c2cc7955446..608d274f9880 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -376,24 +376,25 @@  static bool test_and_set_skip(struct compact_control *cc, struct page *page,
 
 /*
  * Compaction requires the taking of some coarse locks that are potentially
- * very heavily contended. For async compaction, back out if the lock cannot
- * be taken immediately. For sync compaction, spin on the lock if needed.
+ * very heavily contended. For async compaction, trylock and record if the
+ * lock is contended. The lock will still be acquired but compaction will
+ * abort when the current block is finished regardless of success rate.
+ * Sync compaction acquires the lock.
  *
- * Returns true if the lock is held
- * Returns false if the lock is not held and compaction should abort
+ * Always returns true which makes it easier to track lock state in callers.
  */
-static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
+static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
 						struct compact_control *cc)
 {
-	if (cc->mode == MIGRATE_ASYNC) {
-		if (!spin_trylock_irqsave(lock, *flags)) {
-			cc->contended = true;
-			return false;
-		}
-	} else {
-		spin_lock_irqsave(lock, *flags);
+	/* Track if the lock is contended in async mode */
+	if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
+		if (spin_trylock_irqsave(lock, *flags))
+			return true;
+
+		cc->contended = true;
 	}
 
+	spin_lock_irqsave(lock, *flags);
 	return true;
 }
 
@@ -426,10 +427,8 @@  static bool compact_unlock_should_abort(spinlock_t *lock,
 	}
 
 	if (need_resched()) {
-		if (cc->mode == MIGRATE_ASYNC) {
+		if (cc->mode == MIGRATE_ASYNC)
 			cc->contended = true;
-			return true;
-		}
 		cond_resched();
 	}
 
@@ -449,10 +448,8 @@  static inline bool compact_should_abort(struct compact_control *cc)
 {
 	/* async compaction aborts if contended */
 	if (need_resched()) {
-		if (cc->mode == MIGRATE_ASYNC) {
+		if (cc->mode == MIGRATE_ASYNC)
 			cc->contended = true;
-			return true;
-		}
 
 		cond_resched();
 	}
@@ -538,18 +535,8 @@  static unsigned long isolate_freepages_block(struct compact_control *cc,
 		 * recheck as well.
 		 */
 		if (!locked) {
-			/*
-			 * The zone lock must be held to isolate freepages.
-			 * Unfortunately this is a very coarse lock and can be
-			 * heavily contended if there are parallel allocations
-			 * or parallel compactions. For async compaction do not
-			 * spin on the lock and we acquire the lock as late as
-			 * possible.
-			 */
-			locked = compact_trylock_irqsave(&cc->zone->lock,
+			locked = compact_lock_irqsave(&cc->zone->lock,
 								&flags, cc);
-			if (!locked)
-				break;
 
 			/* Recheck this is a buddy page under lock */
 			if (!PageBuddy(page))
@@ -910,15 +897,9 @@  isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 		/* If we already hold the lock, we can skip some rechecking */
 		if (!locked) {
-			locked = compact_trylock_irqsave(zone_lru_lock(zone),
+			locked = compact_lock_irqsave(zone_lru_lock(zone),
 								&flags, cc);
 
-			/* Allow future scanning if the lock is contended */
-			if (!locked) {
-				clear_pageblock_skip(page);
-				break;
-			}
-
 			/* Try get exclusive access under lock */
 			if (!skip_updated) {
 				skip_updated = true;
@@ -961,9 +942,12 @@  isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
 
 		/*
 		 * Avoid isolating too much unless this block is being
-		 * rescanned (e.g. dirty/writeback pages, parallel allocation).
+		 * rescanned (e.g. dirty/writeback pages, parallel allocation)
+		 * or a lock is contended. For contention, isolate quickly to
+		 * potentially remove one source of contention.
 		 */
-		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX && !cc->rescan) {
+		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
+		    !cc->rescan && !cc->contended) {
 			++low_pfn;
 			break;
 		}
@@ -1411,12 +1395,8 @@  static void isolate_freepages(struct compact_control *cc)
 		isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
 					freelist, false);
 
-		/*
-		 * If we isolated enough freepages, or aborted due to lock
-		 * contention, terminate.
-		 */
-		if ((cc->nr_freepages >= cc->nr_migratepages)
-							|| cc->contended) {
+		/* Are enough freepages isolated? */
+		if (cc->nr_freepages >= cc->nr_migratepages) {
 			if (isolate_start_pfn >= block_end_pfn) {
 				/*
 				 * Restart at previous pageblock if more
@@ -1458,13 +1438,8 @@  static struct page *compaction_alloc(struct page *migratepage,
 	struct compact_control *cc = (struct compact_control *)data;
 	struct page *freepage;
 
-	/*
-	 * Isolate free pages if necessary, and if we are not aborting due to
-	 * contention.
-	 */
 	if (list_empty(&cc->freepages)) {
-		if (!cc->contended)
-			isolate_freepages(cc);
+		isolate_freepages(cc);
 
 		if (list_empty(&cc->freepages))
 			return NULL;
@@ -1729,7 +1704,7 @@  static isolate_migrate_t isolate_migratepages(struct zone *zone,
 		low_pfn = isolate_migratepages_block(cc, low_pfn,
 						block_end_pfn, isolate_mode);
 
-		if (!low_pfn || cc->contended)
+		if (!low_pfn)
 			return ISOLATE_ABORT;
 
 		/*
@@ -1759,9 +1734,7 @@  static enum compact_result __compact_finished(struct compact_control *cc)
 {
 	unsigned int order;
 	const int migratetype = cc->migratetype;
-
-	if (cc->contended || fatal_signal_pending(current))
-		return COMPACT_CONTENDED;
+	int ret;
 
 	/* Compaction run completes if the migrate and free scanner meet */
 	if (compact_scanners_met(cc)) {
@@ -1796,6 +1769,7 @@  static enum compact_result __compact_finished(struct compact_control *cc)
 		return COMPACT_CONTINUE;
 
 	/* Direct compactor: Is a suitable page free? */
+	ret = COMPACT_NO_SUITABLE_PAGE;
 	for (order = cc->order; order < MAX_ORDER; order++) {
 		struct free_area *area = &cc->zone->free_area[order];
 		bool can_steal;
@@ -1835,11 +1809,15 @@  static enum compact_result __compact_finished(struct compact_control *cc)
 				return COMPACT_SUCCESS;
 			}
 
-			return COMPACT_CONTINUE;
+			ret = COMPACT_CONTINUE;
+			break;
 		}
 	}
 
-	return COMPACT_NO_SUITABLE_PAGE;
+	if (cc->contended || fatal_signal_pending(current))
+		ret = COMPACT_CONTENDED;
+
+	return ret;
 }
 
 static enum compact_result compact_finished(struct compact_control *cc)
@@ -1981,6 +1959,7 @@  static enum compact_result compact_zone(struct compact_control *cc)
 	unsigned long end_pfn = zone_end_pfn(cc->zone);
 	unsigned long last_migrated_pfn;
 	const bool sync = cc->mode != MIGRATE_ASYNC;
+	unsigned long a, b, c;
 
 	cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
 	ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
@@ -2026,6 +2005,10 @@  static enum compact_result compact_zone(struct compact_control *cc)
 			cc->whole_zone = true;
 	}
 
+	a = cc->migrate_pfn;
+	b = cc->free_pfn;
+	c = (cc->free_pfn - cc->migrate_pfn) / pageblock_nr_pages;
+
 	last_migrated_pfn = 0;
 
 	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,