compaction.c 75.1 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0
2 3 4 5 6 7 8 9 10
/*
 * linux/mm/compaction.c
 *
 * Memory compaction for the reduction of external fragmentation. Note that
 * this heavily depends upon page migration to do all the real heavy
 * lifting
 *
 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
 */
11
#include <linux/cpu.h>
12 13 14 15
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
16
#include <linux/sched/signal.h>
17
#include <linux/backing-dev.h>
18
#include <linux/sysctl.h>
19
#include <linux/sysfs.h>
20
#include <linux/page-isolation.h>
21
#include <linux/kasan.h>
22 23
#include <linux/kthread.h>
#include <linux/freezer.h>
24
#include <linux/page_owner.h>
25
#include <linux/psi.h>
26 27
#include "internal.h"

28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
#ifdef CONFIG_COMPACTION
static inline void count_compact_event(enum vm_event_item item)
{
	count_vm_event(item);
}

static inline void count_compact_events(enum vm_event_item item, long delta)
{
	count_vm_events(item, delta);
}
#else
#define count_compact_event(item) do { } while (0)
#define count_compact_events(item, delta) do { } while (0)
#endif

43 44
#if defined CONFIG_COMPACTION || defined CONFIG_CMA

45 46 47
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>

48 49 50 51 52
#define block_start_pfn(pfn, order)	round_down(pfn, 1UL << (order))
#define block_end_pfn(pfn, order)	ALIGN((pfn) + 1, 1UL << (order))
#define pageblock_start_pfn(pfn)	block_start_pfn(pfn, pageblock_order)
#define pageblock_end_pfn(pfn)		block_end_pfn(pfn, pageblock_order)

53 54 55
static unsigned long release_freepages(struct list_head *freelist)
{
	struct page *page, *next;
56
	unsigned long high_pfn = 0;
57 58

	list_for_each_entry_safe(page, next, freelist, lru) {
59
		unsigned long pfn = page_to_pfn(page);
60 61
		list_del(&page->lru);
		__free_page(page);
62 63
		if (pfn > high_pfn)
			high_pfn = pfn;
64 65
	}

66
	return high_pfn;
67 68
}

69
static void split_map_pages(struct list_head *list)
70
{
71 72 73 74 75 76 77 78 79 80
	unsigned int i, order, nr_pages;
	struct page *page, *next;
	LIST_HEAD(tmp_list);

	list_for_each_entry_safe(page, next, list, lru) {
		list_del(&page->lru);

		order = page_private(page);
		nr_pages = 1 << order;

81
		post_alloc_hook(page, order, __GFP_MOVABLE);
82 83
		if (order)
			split_page(page, order);
84

85 86 87 88
		for (i = 0; i < nr_pages; i++) {
			list_add(&page->lru, &tmp_list);
			page++;
		}
89
	}
90 91

	list_splice(&tmp_list, list);
92 93
}

94
#ifdef CONFIG_COMPACTION
95

96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133
int PageMovable(struct page *page)
{
	struct address_space *mapping;

	VM_BUG_ON_PAGE(!PageLocked(page), page);
	if (!__PageMovable(page))
		return 0;

	mapping = page_mapping(page);
	if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
		return 1;

	return 0;
}
EXPORT_SYMBOL(PageMovable);

void __SetPageMovable(struct page *page, struct address_space *mapping)
{
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
	page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
}
EXPORT_SYMBOL(__SetPageMovable);

void __ClearPageMovable(struct page *page)
{
	VM_BUG_ON_PAGE(!PageLocked(page), page);
	VM_BUG_ON_PAGE(!PageMovable(page), page);
	/*
	 * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
	 * flag so that VM can catch up released page by driver after isolation.
	 * With it, VM migration doesn't try to put it back.
	 */
	page->mapping = (void *)((unsigned long)page->mapping &
				PAGE_MAPPING_MOVABLE);
}
EXPORT_SYMBOL(__ClearPageMovable);

134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203
/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6

/*
 * Compaction is deferred when compaction fails to result in a page
 * allocation success. 1 << compact_defer_limit compactions are skipped up
 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
 */
void defer_compaction(struct zone *zone, int order)
{
	zone->compact_considered = 0;
	zone->compact_defer_shift++;

	if (order < zone->compact_order_failed)
		zone->compact_order_failed = order;

	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;

	trace_mm_compaction_defer_compaction(zone, order);
}

/* Returns true if compaction should be skipped this time */
bool compaction_deferred(struct zone *zone, int order)
{
	unsigned long defer_limit = 1UL << zone->compact_defer_shift;

	if (order < zone->compact_order_failed)
		return false;

	/* Avoid possible overflow */
	if (++zone->compact_considered > defer_limit)
		zone->compact_considered = defer_limit;

	if (zone->compact_considered >= defer_limit)
		return false;

	trace_mm_compaction_deferred(zone, order);

	return true;
}

/*
 * Update defer tracking counters after successful compaction of given order,
 * which means an allocation either succeeded (alloc_success == true) or is
 * expected to succeed.
 */
void compaction_defer_reset(struct zone *zone, int order,
		bool alloc_success)
{
	if (alloc_success) {
		zone->compact_considered = 0;
		zone->compact_defer_shift = 0;
	}
	if (order >= zone->compact_order_failed)
		zone->compact_order_failed = order + 1;

	trace_mm_compaction_defer_reset(zone, order);
}

/* Returns true if restarting compaction after many failures */
bool compaction_restarting(struct zone *zone, int order)
{
	if (order < zone->compact_order_failed)
		return false;

	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
		zone->compact_considered >= 1UL << zone->compact_defer_shift;
}

204 205 206 207 208 209 210 211 212 213
/* Returns true if the pageblock should be scanned for pages to isolate. */
static inline bool isolation_suitable(struct compact_control *cc,
					struct page *page)
{
	if (cc->ignore_skip_hint)
		return true;

	return !get_pageblock_skip(page);
}

214 215 216 217
static void reset_cached_positions(struct zone *zone)
{
	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
218
	zone->compact_cached_free_pfn =
219
				pageblock_start_pfn(zone_end_pfn(zone) - 1);
220 221
}

222
/*
223 224 225
 * Compound pages of >= pageblock_order should consistenly be skipped until
 * released. It is always pointless to compact pages of such order (if they are
 * migratable), and the pageblocks they occupy cannot contain any free pages.
226
 */
227
static bool pageblock_skip_persistent(struct page *page)
228
{
229
	if (!PageCompound(page))
230
		return false;
231 232 233 234 235 236 237

	page = compound_head(page);

	if (compound_order(page) >= pageblock_order)
		return true;

	return false;
238 239
}

240 241 242 243 244
static bool
__reset_isolation_pfn(struct zone *zone, unsigned long pfn, bool check_source,
							bool check_target)
{
	struct page *page = pfn_to_online_page(pfn);
245
	struct page *block_page;
246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
	struct page *end_page;
	unsigned long block_pfn;

	if (!page)
		return false;
	if (zone != page_zone(page))
		return false;
	if (pageblock_skip_persistent(page))
		return false;

	/*
	 * If skip is already cleared do no further checking once the
	 * restart points have been set.
	 */
	if (check_source && check_target && !get_pageblock_skip(page))
		return true;

	/*
	 * If clearing skip for the target scanner, do not select a
	 * non-movable pageblock as the starting point.
	 */
	if (!check_source && check_target &&
	    get_pageblock_migratetype(page) != MIGRATE_MOVABLE)
		return false;

271 272
	/* Ensure the start of the pageblock or zone is online and valid */
	block_pfn = pageblock_start_pfn(pfn);
273 274
	block_pfn = max(block_pfn, zone->zone_start_pfn);
	block_page = pfn_to_online_page(block_pfn);
275 276 277 278 279 280
	if (block_page) {
		page = block_page;
		pfn = block_pfn;
	}

	/* Ensure the end of the pageblock or zone is online and valid */
281
	block_pfn = pageblock_end_pfn(pfn) - 1;
282 283 284 285 286
	block_pfn = min(block_pfn, zone_end_pfn(zone) - 1);
	end_page = pfn_to_online_page(block_pfn);
	if (!end_page)
		return false;

287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
	/*
	 * Only clear the hint if a sample indicates there is either a
	 * free page or an LRU page in the block. One or other condition
	 * is necessary for the block to be a migration source/target.
	 */
	do {
		if (pfn_valid_within(pfn)) {
			if (check_source && PageLRU(page)) {
				clear_pageblock_skip(page);
				return true;
			}

			if (check_target && PageBuddy(page)) {
				clear_pageblock_skip(page);
				return true;
			}
		}

		page += (1 << PAGE_ALLOC_COSTLY_ORDER);
		pfn += (1 << PAGE_ALLOC_COSTLY_ORDER);
307
	} while (page <= end_page);
308 309 310 311

	return false;
}

312 313 314 315 316
/*
 * This function is called to clear all cached information on pageblocks that
 * should be skipped for page isolation when the migrate and free page scanner
 * meet.
 */
317
static void __reset_isolation_suitable(struct zone *zone)
318
{
319
	unsigned long migrate_pfn = zone->zone_start_pfn;
320
	unsigned long free_pfn = zone_end_pfn(zone) - 1;
321 322 323 324 325 326 327
	unsigned long reset_migrate = free_pfn;
	unsigned long reset_free = migrate_pfn;
	bool source_set = false;
	bool free_set = false;

	if (!zone->compact_blockskip_flush)
		return;
328

329
	zone->compact_blockskip_flush = false;
330

331 332 333 334 335 336 337 338
	/*
	 * Walk the zone and update pageblock skip information. Source looks
	 * for PageLRU while target looks for PageBuddy. When the scanner
	 * is found, both PageBuddy and PageLRU are checked as the pageblock
	 * is suitable as both source and target.
	 */
	for (; migrate_pfn < free_pfn; migrate_pfn += pageblock_nr_pages,
					free_pfn -= pageblock_nr_pages) {
339 340
		cond_resched();

341 342 343 344 345 346 347 348 349
		/* Update the migrate PFN */
		if (__reset_isolation_pfn(zone, migrate_pfn, true, source_set) &&
		    migrate_pfn < reset_migrate) {
			source_set = true;
			reset_migrate = migrate_pfn;
			zone->compact_init_migrate_pfn = reset_migrate;
			zone->compact_cached_migrate_pfn[0] = reset_migrate;
			zone->compact_cached_migrate_pfn[1] = reset_migrate;
		}
350

351 352 353 354 355 356 357 358
		/* Update the free PFN */
		if (__reset_isolation_pfn(zone, free_pfn, free_set, true) &&
		    free_pfn > reset_free) {
			free_set = true;
			reset_free = free_pfn;
			zone->compact_init_free_pfn = reset_free;
			zone->compact_cached_free_pfn = reset_free;
		}
359
	}
360

361 362 363 364 365 366
	/* Leave no distance if no suitable block was reset */
	if (reset_migrate >= reset_free) {
		zone->compact_cached_migrate_pfn[0] = migrate_pfn;
		zone->compact_cached_migrate_pfn[1] = migrate_pfn;
		zone->compact_cached_free_pfn = free_pfn;
	}
367 368
}

369 370 371 372 373 374 375 376 377 378 379 380 381 382 383
void reset_isolation_suitable(pg_data_t *pgdat)
{
	int zoneid;

	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
		struct zone *zone = &pgdat->node_zones[zoneid];
		if (!populated_zone(zone))
			continue;

		/* Only flush if a full compaction finished recently */
		if (zone->compact_blockskip_flush)
			__reset_isolation_suitable(zone);
	}
}

384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423
/*
 * Sets the pageblock skip bit if it was clear. Note that this is a hint as
 * locks are not required for read/writers. Returns true if it was already set.
 */
static bool test_and_set_skip(struct compact_control *cc, struct page *page,
							unsigned long pfn)
{
	bool skip;

	/* Do no update if skip hint is being ignored */
	if (cc->ignore_skip_hint)
		return false;

	if (!IS_ALIGNED(pfn, pageblock_nr_pages))
		return false;

	skip = get_pageblock_skip(page);
	if (!skip && !cc->no_set_skip_hint)
		set_pageblock_skip(page);

	return skip;
}

static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
{
	struct zone *zone = cc->zone;

	pfn = pageblock_end_pfn(pfn);

	/* Set for isolation rather than compaction */
	if (cc->no_set_skip_hint)
		return;

	if (pfn > zone->compact_cached_migrate_pfn[0])
		zone->compact_cached_migrate_pfn[0] = pfn;
	if (cc->mode != MIGRATE_ASYNC &&
	    pfn > zone->compact_cached_migrate_pfn[1])
		zone->compact_cached_migrate_pfn[1] = pfn;
}

424 425
/*
 * If no pages were isolated then mark this pageblock to be skipped in the
426
 * future. The information is later cleared by __reset_isolation_suitable().
427
 */
428
static void update_pageblock_skip(struct compact_control *cc,
429
			struct page *page, unsigned long pfn)
430
{
431
	struct zone *zone = cc->zone;
432

433
	if (cc->no_set_skip_hint)
434 435
		return;

436 437 438
	if (!page)
		return;

439
	set_pageblock_skip(page);
440

441
	/* Update where async and sync compaction should restart */
442 443
	if (pfn < zone->compact_cached_free_pfn)
		zone->compact_cached_free_pfn = pfn;
444 445 446 447 448 449 450 451
}
#else
static inline bool isolation_suitable(struct compact_control *cc,
					struct page *page)
{
	return true;
}

452
static inline bool pageblock_skip_persistent(struct page *page)
453 454 455 456 457
{
	return false;
}

static inline void update_pageblock_skip(struct compact_control *cc,
458
			struct page *page, unsigned long pfn)
459 460
{
}
461 462 463 464 465 466 467 468 469 470

static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
{
}

static bool test_and_set_skip(struct compact_control *cc, struct page *page,
							unsigned long pfn)
{
	return false;
}
471 472
#endif /* CONFIG_COMPACTION */

473 474
/*
 * Compaction requires the taking of some coarse locks that are potentially
475 476 477 478
 * very heavily contended. For async compaction, trylock and record if the
 * lock is contended. The lock will still be acquired but compaction will
 * abort when the current block is finished regardless of success rate.
 * Sync compaction acquires the lock.
479
 *
480
 * Always returns true which makes it easier to track lock state in callers.
481
 */
482
static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
483
						struct compact_control *cc)
484
	__acquires(lock)
485
{
486 487 488 489 490 491
	/* Track if the lock is contended in async mode */
	if (cc->mode == MIGRATE_ASYNC && !cc->contended) {
		if (spin_trylock_irqsave(lock, *flags))
			return true;

		cc->contended = true;
492
	}
493

494
	spin_lock_irqsave(lock, *flags);
495
	return true;
496 497
}

498 499
/*
 * Compaction requires the taking of some coarse locks that are potentially
500 501 502 503 504 505 506
 * very heavily contended. The lock should be periodically unlocked to avoid
 * having disabled IRQs for a long time, even when there is nobody waiting on
 * the lock. It might also be that allowing the IRQs will result in
 * need_resched() becoming true. If scheduling is needed, async compaction
 * aborts. Sync compaction schedules.
 * Either compaction type will also abort if a fatal signal is pending.
 * In either case if the lock was locked, it is dropped and not regained.
507
 *
508 509 510 511
 * Returns true if compaction should abort due to fatal signal pending, or
 *		async compaction due to need_resched()
 * Returns false when compaction can continue (sync compaction might have
 *		scheduled)
512
 */
513 514
static bool compact_unlock_should_abort(spinlock_t *lock,
		unsigned long flags, bool *locked, struct compact_control *cc)
515
{
516 517 518 519
	if (*locked) {
		spin_unlock_irqrestore(lock, flags);
		*locked = false;
	}
520

521
	if (fatal_signal_pending(current)) {
522
		cc->contended = true;
523 524
		return true;
	}
525

526
	cond_resched();
527 528 529 530

	return false;
}

531
/*
532 533 534
 * Isolate free pages onto a private freelist. If @strict is true, will abort
 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
 * (even though it may still end up isolating some pages).
535
 */
536
static unsigned long isolate_freepages_block(struct compact_control *cc,
537
				unsigned long *start_pfn,
538 539
				unsigned long end_pfn,
				struct list_head *freelist,
540
				unsigned int stride,
541
				bool strict)
542
{
543
	int nr_scanned = 0, total_isolated = 0;
544
	struct page *cursor;
545
	unsigned long flags = 0;
546
	bool locked = false;
547
	unsigned long blockpfn = *start_pfn;
548
	unsigned int order;
549

550 551 552 553
	/* Strict mode is for isolation, speed is secondary */
	if (strict)
		stride = 1;

554 555
	cursor = pfn_to_page(blockpfn);

556
	/* Isolate free pages. */
557
	for (; blockpfn < end_pfn; blockpfn += stride, cursor += stride) {
558
		int isolated;
559 560
		struct page *page = cursor;

561 562 563 564 565 566 567 568 569 570
		/*
		 * Periodically drop the lock (if held) regardless of its
		 * contention, to give chance to IRQs. Abort if fatal signal
		 * pending or async compaction detects need_resched()
		 */
		if (!(blockpfn % SWAP_CLUSTER_MAX)
		    && compact_unlock_should_abort(&cc->zone->lock, flags,
								&locked, cc))
			break;

571
		nr_scanned++;
572
		if (!pfn_valid_within(blockpfn))
573 574
			goto isolate_fail;

575 576 577 578 579 580 581
		/*
		 * For compound pages such as THP and hugetlbfs, we can save
		 * potentially a lot of iterations if we skip them at once.
		 * The check is racy, but we can consider only valid values
		 * and the only danger is skipping too much.
		 */
		if (PageCompound(page)) {
582 583
			const unsigned int order = compound_order(page);

584
			if (likely(order < MAX_ORDER)) {
585 586
				blockpfn += (1UL << order) - 1;
				cursor += (1UL << order) - 1;
587 588 589 590
			}
			goto isolate_fail;
		}

591
		if (!PageBuddy(page))
592
			goto isolate_fail;
593 594

		/*
595 596 597 598 599
		 * If we already hold the lock, we can skip some rechecking.
		 * Note that if we hold the lock now, checked_pageblock was
		 * already set in some previous iteration (or strict is true),
		 * so it is correct to skip the suitable migration target
		 * recheck as well.
600
		 */
601
		if (!locked) {
602
			locked = compact_lock_irqsave(&cc->zone->lock,
603
								&flags, cc);
604

605 606 607 608
			/* Recheck this is a buddy page under lock */
			if (!PageBuddy(page))
				goto isolate_fail;
		}
609

610 611 612
		/* Found a free page, will break it into order-0 pages */
		order = page_order(page);
		isolated = __isolate_free_page(page, order);
613 614
		if (!isolated)
			break;
615
		set_page_private(page, order);
616

617
		total_isolated += isolated;
618
		cc->nr_freepages += isolated;
619 620
		list_add_tail(&page->lru, freelist);

621 622 623
		if (!strict && cc->nr_migratepages <= cc->nr_freepages) {
			blockpfn += isolated;
			break;
624
		}
625 626 627 628
		/* Advance to the end of split page */
		blockpfn += isolated - 1;
		cursor += isolated - 1;
		continue;
629 630 631 632 633 634 635

isolate_fail:
		if (strict)
			break;
		else
			continue;

636 637
	}

638 639 640
	if (locked)
		spin_unlock_irqrestore(&cc->zone->lock, flags);

641 642 643 644 645 646 647
	/*
	 * There is a tiny chance that we have read bogus compound_order(),
	 * so be careful to not go outside of the pageblock.
	 */
	if (unlikely(blockpfn > end_pfn))
		blockpfn = end_pfn;

648 649 650
	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
					nr_scanned, total_isolated);

651 652 653
	/* Record how far we have got within the block */
	*start_pfn = blockpfn;

654 655 656 657 658
	/*
	 * If strict isolation is requested by CMA then check that all the
	 * pages requested were isolated. If there were any failures, 0 is
	 * returned and CMA will fail.
	 */
659
	if (strict && blockpfn < end_pfn)
660 661
		total_isolated = 0;

662
	cc->total_free_scanned += nr_scanned;
663
	if (total_isolated)
664
		count_compact_events(COMPACTISOLATED, total_isolated);
665 666 667
	return total_isolated;
}

668 669
/**
 * isolate_freepages_range() - isolate free pages.
670
 * @cc:        Compaction control structure.
671 672 673 674 675 676 677 678 679 680 681
 * @start_pfn: The first PFN to start isolating.
 * @end_pfn:   The one-past-last PFN.
 *
 * Non-free pages, invalid PFNs, or zone boundaries within the
 * [start_pfn, end_pfn) range are considered errors, cause function to
 * undo its actions and return zero.
 *
 * Otherwise, function returns one-past-the-last PFN of isolated page
 * (which may be greater then end_pfn if end fell in a middle of
 * a free page).
 */
682
unsigned long
683 684
isolate_freepages_range(struct compact_control *cc,
			unsigned long start_pfn, unsigned long end_pfn)
685
{
686
	unsigned long isolated, pfn, block_start_pfn, block_end_pfn;
687 688
	LIST_HEAD(freelist);

689
	pfn = start_pfn;
690
	block_start_pfn = pageblock_start_pfn(pfn);
691 692
	if (block_start_pfn < cc->zone->zone_start_pfn)
		block_start_pfn = cc->zone->zone_start_pfn;
693
	block_end_pfn = pageblock_end_pfn(pfn);
694 695

	for (; pfn < end_pfn; pfn += isolated,
696
				block_start_pfn = block_end_pfn,
697
				block_end_pfn += pageblock_nr_pages) {
698 699
		/* Protect pfn from changing by isolate_freepages_block */
		unsigned long isolate_start_pfn = pfn;
700 701 702

		block_end_pfn = min(block_end_pfn, end_pfn);

703 704 705 706 707 708
		/*
		 * pfn could pass the block_end_pfn if isolated freepage
		 * is more than pageblock order. In this case, we adjust
		 * scanning range to right one.
		 */
		if (pfn >= block_end_pfn) {
709 710
			block_start_pfn = pageblock_start_pfn(pfn);
			block_end_pfn = pageblock_end_pfn(pfn);
711 712 713
			block_end_pfn = min(block_end_pfn, end_pfn);
		}

714 715
		if (!pageblock_pfn_to_page(block_start_pfn,
					block_end_pfn, cc->zone))
716 717
			break;

718
		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
719
					block_end_pfn, &freelist, 0, true);
720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735

		/*
		 * In strict mode, isolate_freepages_block() returns 0 if
		 * there are any holes in the block (ie. invalid PFNs or
		 * non-free pages).
		 */
		if (!isolated)
			break;

		/*
		 * If we managed to isolate pages, it is always (1 << n) *
		 * pageblock_nr_pages for some non-negative n.  (Max order
		 * page may span two pageblocks).
		 */
	}

736
	/* __isolate_free_page() does not map the pages */
737
	split_map_pages(&freelist);
738 739 740 741 742 743 744 745 746 747 748

	if (pfn < end_pfn) {
		/* Loop terminated early, cleanup. */
		release_freepages(&freelist);
		return 0;
	}

	/* We don't use freelists for anything. */
	return pfn;
}

749 750 751
/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct zone *zone)
{
752
	unsigned long active, inactive, isolated;
753

M
Mel Gorman 已提交
754 755 756 757 758 759
	inactive = node_page_state(zone->zone_pgdat, NR_INACTIVE_FILE) +
			node_page_state(zone->zone_pgdat, NR_INACTIVE_ANON);
	active = node_page_state(zone->zone_pgdat, NR_ACTIVE_FILE) +
			node_page_state(zone->zone_pgdat, NR_ACTIVE_ANON);
	isolated = node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE) +
			node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON);
760

761
	return isolated > (inactive + active) / 2;
762 763
}

764
/**
765 766
 * isolate_migratepages_block() - isolate all migrate-able pages within
 *				  a single pageblock
767
 * @cc:		Compaction control structure.
768 769 770
 * @low_pfn:	The first PFN to isolate
 * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
 * @isolate_mode: Isolation mode to be used.
771 772
 *
 * Isolate all pages that can be migrated from the range specified by
773 774 775 776
 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
 * Returns zero if there is a fatal signal pending, otherwise PFN of the
 * first page that was not scanned (which may be both less, equal to or more
 * than end_pfn).
777
 *
778 779 780
 * The pages are isolated on cc->migratepages list (not required to be empty),
 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
 * is neither read nor updated.
781
 */
782 783 784
static unsigned long
isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
			unsigned long end_pfn, isolate_mode_t isolate_mode)
785
{
786
	struct zone *zone = cc->zone;
787
	unsigned long nr_scanned = 0, nr_isolated = 0;
788
	struct lruvec *lruvec;
789
	unsigned long flags = 0;
790
	bool locked = false;
791
	struct page *page = NULL, *valid_page = NULL;
792
	unsigned long start_pfn = low_pfn;
793 794
	bool skip_on_failure = false;
	unsigned long next_skip_pfn = 0;
795
	bool skip_updated = false;
796 797 798 799 800 801 802

	/*
	 * Ensure that there are not too many pages isolated from the LRU
	 * list by either parallel reclaimers or compaction. If there are,
	 * delay for some time until fewer pages are isolated
	 */
	while (unlikely(too_many_isolated(zone))) {
803
		/* async migration should just abort */
804
		if (cc->mode == MIGRATE_ASYNC)
805
			return 0;
806

807 808 809
		congestion_wait(BLK_RW_ASYNC, HZ/10);

		if (fatal_signal_pending(current))
810
			return 0;
811 812
	}

813
	cond_resched();
814

815 816 817 818 819
	if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
		skip_on_failure = true;
		next_skip_pfn = block_end_pfn(low_pfn, cc->order);
	}

820 821
	/* Time to isolate some pages for migration */
	for (; low_pfn < end_pfn; low_pfn++) {
822

823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844
		if (skip_on_failure && low_pfn >= next_skip_pfn) {
			/*
			 * We have isolated all migration candidates in the
			 * previous order-aligned block, and did not skip it due
			 * to failure. We should migrate the pages now and
			 * hopefully succeed compaction.
			 */
			if (nr_isolated)
				break;

			/*
			 * We failed to isolate in the previous order-aligned
			 * block. Set the new boundary to the end of the
			 * current block. Note we can't simply increase
			 * next_skip_pfn by 1 << order, as low_pfn might have
			 * been incremented by a higher number due to skipping
			 * a compound or a high-order buddy page in the
			 * previous loop iteration.
			 */
			next_skip_pfn = block_end_pfn(low_pfn, cc->order);
		}

845 846
		/*
		 * Periodically drop the lock (if held) regardless of its
847 848
		 * contention, to give chance to IRQs. Abort completely if
		 * a fatal signal is pending.
849 850
		 */
		if (!(low_pfn % SWAP_CLUSTER_MAX)
851
		    && compact_unlock_should_abort(zone_lru_lock(zone), flags,
852 853 854 855
								&locked, cc)) {
			low_pfn = 0;
			goto fatal_pending;
		}
856

857
		if (!pfn_valid_within(low_pfn))
858
			goto isolate_fail;
859
		nr_scanned++;
860 861

		page = pfn_to_page(low_pfn);
862

863 864 865 866 867 868 869 870 871 872 873
		/*
		 * Check if the pageblock has already been marked skipped.
		 * Only the aligned PFN is checked as the caller isolates
		 * COMPACT_CLUSTER_MAX at a time so the second call must
		 * not falsely conclude that the block should be skipped.
		 */
		if (!valid_page && IS_ALIGNED(low_pfn, pageblock_nr_pages)) {
			if (!cc->ignore_skip_hint && get_pageblock_skip(page)) {
				low_pfn = end_pfn;
				goto isolate_abort;
			}
874
			valid_page = page;
875
		}
876

877
		/*
878 879 880 881
		 * Skip if free. We read page order here without zone lock
		 * which is generally unsafe, but the race window is small and
		 * the worst thing that can happen is that we skip some
		 * potential isolation targets.
882
		 */
883 884 885 886 887 888 889 890 891 892
		if (PageBuddy(page)) {
			unsigned long freepage_order = page_order_unsafe(page);

			/*
			 * Without lock, we cannot be sure that what we got is
			 * a valid page order. Consider only values in the
			 * valid order range to prevent low_pfn overflow.
			 */
			if (freepage_order > 0 && freepage_order < MAX_ORDER)
				low_pfn += (1UL << freepage_order) - 1;
893
			continue;
894
		}
895

896
		/*
897 898 899 900 901
		 * Regardless of being on LRU, compound pages such as THP and
		 * hugetlbfs are not to be compacted. We can potentially save
		 * a lot of iterations if we skip them at once. The check is
		 * racy, but we can consider only valid values and the only
		 * danger is skipping too much.
902
		 */
903
		if (PageCompound(page)) {
904
			const unsigned int order = compound_order(page);
905

906
			if (likely(order < MAX_ORDER))
907
				low_pfn += (1UL << order) - 1;
908
			goto isolate_fail;
909 910
		}

911 912 913 914 915 916 917 918 919 920 921 922 923
		/*
		 * Check may be lockless but that's ok as we recheck later.
		 * It's possible to migrate LRU and non-lru movable pages.
		 * Skip any other type of page
		 */
		if (!PageLRU(page)) {
			/*
			 * __PageMovable can return false positive so we need
			 * to verify it under page_lock.
			 */
			if (unlikely(__PageMovable(page)) &&
					!PageIsolated(page)) {
				if (locked) {
924
					spin_unlock_irqrestore(zone_lru_lock(zone),
925 926 927 928
									flags);
					locked = false;
				}

929
				if (!isolate_movable_page(page, isolate_mode))
930 931 932
					goto isolate_success;
			}

933
			goto isolate_fail;
934
		}
935

936 937 938 939 940 941 942
		/*
		 * Migration will fail if an anonymous page is pinned in memory,
		 * so avoid taking lru_lock and isolating it unnecessarily in an
		 * admittedly racy check.
		 */
		if (!page_mapping(page) &&
		    page_count(page) > page_mapcount(page))
943
			goto isolate_fail;
944

945 946 947 948 949 950 951
		/*
		 * Only allow to migrate anonymous pages in GFP_NOFS context
		 * because those do not depend on fs locks.
		 */
		if (!(cc->gfp_mask & __GFP_FS) && page_mapping(page))
			goto isolate_fail;

952 953
		/* If we already hold the lock, we can skip some rechecking */
		if (!locked) {
954
			locked = compact_lock_irqsave(zone_lru_lock(zone),
955
								&flags, cc);
956 957 958 959 960 961 962

			/* Try get exclusive access under lock */
			if (!skip_updated) {
				skip_updated = true;
				if (test_and_set_skip(cc, page, low_pfn))
					goto isolate_abort;
			}
963

964
			/* Recheck PageLRU and PageCompound under lock */
965
			if (!PageLRU(page))
966
				goto isolate_fail;
967 968 969 970 971 972 973

			/*
			 * Page become compound since the non-locked check,
			 * and it's on LRU. It can only be a THP so the order
			 * is safe to read and it's 0 for tail pages.
			 */
			if (unlikely(PageCompound(page))) {
974
				low_pfn += (1UL << compound_order(page)) - 1;
975
				goto isolate_fail;
976
			}
977 978
		}

M
Mel Gorman 已提交
979
		lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
980

981
		/* Try isolate the page */
982
		if (__isolate_lru_page(page, isolate_mode) != 0)
983
			goto isolate_fail;
984

985
		VM_BUG_ON_PAGE(PageCompound(page), page);
986

987
		/* Successfully isolated */
988
		del_page_from_lru_list(page, lruvec, page_lru(page));
989 990
		inc_node_page_state(page,
				NR_ISOLATED_ANON + page_is_file_cache(page));
991 992

isolate_success:
993
		list_add(&page->lru, &cc->migratepages);
994
		cc->nr_migratepages++;
995
		nr_isolated++;
996

997 998
		/*
		 * Avoid isolating too much unless this block is being
999 1000 1001
		 * rescanned (e.g. dirty/writeback pages, parallel allocation)
		 * or a lock is contended. For contention, isolate quickly to
		 * potentially remove one source of contention.
1002
		 */
1003 1004
		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX &&
		    !cc->rescan && !cc->contended) {
1005
			++low_pfn;
1006
			break;
1007
		}
1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020

		continue;
isolate_fail:
		if (!skip_on_failure)
			continue;

		/*
		 * We have isolated some pages, but then failed. Release them
		 * instead of migrating, as we cannot form the cc->order buddy
		 * page anyway.
		 */
		if (nr_isolated) {
			if (locked) {
1021
				spin_unlock_irqrestore(zone_lru_lock(zone), flags);
1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036
				locked = false;
			}
			putback_movable_pages(&cc->migratepages);
			cc->nr_migratepages = 0;
			nr_isolated = 0;
		}

		if (low_pfn < next_skip_pfn) {
			low_pfn = next_skip_pfn - 1;
			/*
			 * The check near the loop beginning would have updated
			 * next_skip_pfn too, but this is a bit simpler.
			 */
			next_skip_pfn += 1UL << cc->order;
		}
1037 1038
	}

1039 1040 1041 1042 1043 1044 1045
	/*
	 * The PageBuddy() check could have potentially brought us outside
	 * the range to be scanned.
	 */
	if (unlikely(low_pfn > end_pfn))
		low_pfn = end_pfn;

1046
isolate_abort:
1047
	if (locked)
1048
		spin_unlock_irqrestore(zone_lru_lock(zone), flags);
1049

1050
	/*
1051 1052 1053 1054 1055 1056
	 * Updated the cached scanner pfn once the pageblock has been scanned
	 * Pages will either be migrated in which case there is no point
	 * scanning in the near future or migration failed in which case the
	 * failure reason may persist. The block is marked for skipping if
	 * there were no pages isolated in the block or if the block is
	 * rescanned twice in a row.
1057
	 */
1058
	if (low_pfn == end_pfn && (!nr_isolated || cc->rescan)) {
1059 1060 1061 1062
		if (valid_page && !skip_updated)
			set_pageblock_skip(valid_page);
		update_cached_migrate(cc, low_pfn);
	}
1063

1064 1065
	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
						nr_scanned, nr_isolated);
1066

1067
fatal_pending:
1068
	cc->total_migrate_scanned += nr_scanned;
1069
	if (nr_isolated)
1070
		count_compact_events(COMPACTISOLATED, nr_isolated);
1071

1072 1073 1074
	return low_pfn;
}

1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088
/**
 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
 * @cc:        Compaction control structure.
 * @start_pfn: The first PFN to start isolating.
 * @end_pfn:   The one-past-last PFN.
 *
 * Returns zero if isolation fails fatally due to e.g. pending signal.
 * Otherwise, function returns one-past-the-last PFN of isolated page
 * (which may be greater than end_pfn if end fell in a middle of a THP page).
 */
unsigned long
isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
							unsigned long end_pfn)
{
1089
	unsigned long pfn, block_start_pfn, block_end_pfn;
1090 1091 1092

	/* Scan block by block. First and last block may be incomplete */
	pfn = start_pfn;
1093
	block_start_pfn = pageblock_start_pfn(pfn);
1094 1095
	if (block_start_pfn < cc->zone->zone_start_pfn)
		block_start_pfn = cc->zone->zone_start_pfn;
1096
	block_end_pfn = pageblock_end_pfn(pfn);
1097 1098

	for (; pfn < end_pfn; pfn = block_end_pfn,
1099
				block_start_pfn = block_end_pfn,
1100 1101 1102 1103
				block_end_pfn += pageblock_nr_pages) {

		block_end_pfn = min(block_end_pfn, end_pfn);

1104 1105
		if (!pageblock_pfn_to_page(block_start_pfn,
					block_end_pfn, cc->zone))
1106 1107 1108 1109 1110
			continue;

		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
							ISOLATE_UNEVICTABLE);

1111
		if (!pfn)
1112
			break;
1113 1114 1115

		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
			break;
1116 1117 1118 1119 1120
	}

	return pfn;
}

1121 1122
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
1123

1124 1125 1126
static bool suitable_migration_source(struct compact_control *cc,
							struct page *page)
{
1127 1128
	int block_mt;

1129 1130 1131
	if (pageblock_skip_persistent(page))
		return false;

1132
	if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1133 1134
		return true;

1135 1136 1137 1138 1139 1140
	block_mt = get_pageblock_migratetype(page);

	if (cc->migratetype == MIGRATE_MOVABLE)
		return is_migrate_movable(block_mt);
	else
		return block_mt == cc->migratetype;
1141 1142
}

1143
/* Returns true if the page is within a block suitable for migration to */
1144 1145
static bool suitable_migration_target(struct compact_control *cc,
							struct page *page)
1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157
{
	/* If the page is a large free page, then disallow migration */
	if (PageBuddy(page)) {
		/*
		 * We are checking page_order without zone->lock taken. But
		 * the only small danger is that we skip a potentially suitable
		 * pageblock, so it's not worth to check order for valid range.
		 */
		if (page_order_unsafe(page) >= pageblock_order)
			return false;
	}

1158 1159 1160
	if (cc->ignore_block_suitable)
		return true;

1161
	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
1162
	if (is_migrate_movable(get_pageblock_migratetype(page)))
1163 1164 1165 1166 1167 1168
		return true;

	/* Otherwise skip the block */
	return false;
}

1169 1170 1171
static inline unsigned int
freelist_scan_limit(struct compact_control *cc)
{
1172 1173 1174
	unsigned short shift = BITS_PER_LONG - 1;

	return (COMPACT_CLUSTER_MAX >> min(shift, cc->fast_search_fail)) + 1;
1175 1176
}

1177 1178 1179 1180 1181 1182 1183 1184 1185 1186
/*
 * Test whether the free scanner has reached the same or lower pageblock than
 * the migration scanner, and compaction should thus terminate.
 */
static inline bool compact_scanners_met(struct compact_control *cc)
{
	return (cc->free_pfn >> pageblock_order)
		<= (cc->migrate_pfn >> pageblock_order);
}

1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
/*
 * Used when scanning for a suitable migration target which scans freelists
 * in reverse. Reorders the list such as the unscanned pages are scanned
 * first on the next iteration of the free scanner
 */
static void
move_freelist_head(struct list_head *freelist, struct page *freepage)
{
	LIST_HEAD(sublist);

	if (!list_is_last(freelist, &freepage->lru)) {
		list_cut_before(&sublist, freelist, &freepage->lru);
		if (!list_empty(&sublist))
			list_splice_tail(&sublist, freelist);
	}
}

/*
 * Similar to move_freelist_head except used by the migration scanner
 * when scanning forward. It's possible for these list operations to
 * move against each other if they search the free list exactly in
 * lockstep.
 */
1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
static void
move_freelist_tail(struct list_head *freelist, struct page *freepage)
{
	LIST_HEAD(sublist);

	if (!list_is_first(freelist, &freepage->lru)) {
		list_cut_position(&sublist, freelist, &freepage->lru);
		if (!list_empty(&sublist))
			list_splice_tail(&sublist, freelist);
	}
}

1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237
static void
fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long nr_isolated)
{
	unsigned long start_pfn, end_pfn;
	struct page *page = pfn_to_page(pfn);

	/* Do not search around if there are enough pages already */
	if (cc->nr_freepages >= cc->nr_migratepages)
		return;

	/* Minimise scanning during async compaction */
	if (cc->direct_compaction && cc->mode == MIGRATE_ASYNC)
		return;

	/* Pageblock boundaries */
	start_pfn = pageblock_start_pfn(pfn);
1238
	end_pfn = min(pageblock_end_pfn(pfn), zone_end_pfn(cc->zone)) - 1;
1239 1240 1241

	/* Scan before */
	if (start_pfn != pfn) {
1242
		isolate_freepages_block(cc, &start_pfn, pfn, &cc->freepages, 1, false);
1243 1244 1245 1246 1247 1248
		if (cc->nr_freepages >= cc->nr_migratepages)
			return;
	}

	/* Scan after */
	start_pfn = pfn + nr_isolated;
1249
	if (start_pfn < end_pfn)
1250
		isolate_freepages_block(cc, &start_pfn, end_pfn, &cc->freepages, 1, false);
1251 1252 1253 1254 1255 1256

	/* Skip this pageblock in the future as it's full or nearly full */
	if (cc->nr_freepages < cc->nr_migratepages)
		set_pageblock_skip(page);
}

1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274
/* Search orders in round-robin fashion */
static int next_search_order(struct compact_control *cc, int order)
{
	order--;
	if (order < 0)
		order = cc->order - 1;

	/* Search wrapped around? */
	if (order == cc->search_order) {
		cc->search_order--;
		if (cc->search_order < 0)
			cc->search_order = cc->order - 1;
		return -1;
	}

	return order;
}

1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294
static unsigned long
fast_isolate_freepages(struct compact_control *cc)
{
	unsigned int limit = min(1U, freelist_scan_limit(cc) >> 1);
	unsigned int nr_scanned = 0;
	unsigned long low_pfn, min_pfn, high_pfn = 0, highest = 0;
	unsigned long nr_isolated = 0;
	unsigned long distance;
	struct page *page = NULL;
	bool scan_start = false;
	int order;

	/* Full compaction passes in a negative order */
	if (cc->order <= 0)
		return cc->free_pfn;

	/*
	 * If starting the scan, use a deeper search and use the highest
	 * PFN found if a suitable one is not found.
	 */
1295
	if (cc->free_pfn >= cc->zone->compact_init_free_pfn) {
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310
		limit = pageblock_nr_pages >> 1;
		scan_start = true;
	}

	/*
	 * Preferred point is in the top quarter of the scan space but take
	 * a pfn from the top half if the search is problematic.
	 */
	distance = (cc->free_pfn - cc->migrate_pfn);
	low_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 2));
	min_pfn = pageblock_start_pfn(cc->free_pfn - (distance >> 1));

	if (WARN_ON_ONCE(min_pfn > low_pfn))
		low_pfn = min_pfn;

1311 1312 1313 1314 1315 1316 1317 1318 1319
	/*
	 * Search starts from the last successful isolation order or the next
	 * order to search after a previous failure
	 */
	cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);

	for (order = cc->search_order;
	     !page && order >= 0;
	     order = next_search_order(cc, order)) {
1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342
		struct free_area *area = &cc->zone->free_area[order];
		struct list_head *freelist;
		struct page *freepage;
		unsigned long flags;
		unsigned int order_scanned = 0;

		if (!area->nr_free)
			continue;

		spin_lock_irqsave(&cc->zone->lock, flags);
		freelist = &area->free_list[MIGRATE_MOVABLE];
		list_for_each_entry_reverse(freepage, freelist, lru) {
			unsigned long pfn;

			order_scanned++;
			nr_scanned++;
			pfn = page_to_pfn(freepage);

			if (pfn >= highest)
				highest = pageblock_start_pfn(pfn);

			if (pfn >= low_pfn) {
				cc->fast_search_fail = 0;
1343
				cc->search_order = order;
1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379
				page = freepage;
				break;
			}

			if (pfn >= min_pfn && pfn > high_pfn) {
				high_pfn = pfn;

				/* Shorten the scan if a candidate is found */
				limit >>= 1;
			}

			if (order_scanned >= limit)
				break;
		}

		/* Use a minimum pfn if a preferred one was not found */
		if (!page && high_pfn) {
			page = pfn_to_page(high_pfn);

			/* Update freepage for the list reorder below */
			freepage = page;
		}

		/* Reorder to so a future search skips recent pages */
		move_freelist_head(freelist, freepage);

		/* Isolate the page if available */
		if (page) {
			if (__isolate_free_page(page, order)) {
				set_page_private(page, order);
				nr_isolated = 1 << order;
				cc->nr_freepages += nr_isolated;
				list_add_tail(&page->lru, &cc->freepages);
				count_compact_events(COMPACTISOLATED, nr_isolated);
			} else {
				/* If isolation fails, abort the search */
1380
				order = cc->search_order + 1;
1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406
				page = NULL;
			}
		}

		spin_unlock_irqrestore(&cc->zone->lock, flags);

		/*
		 * Smaller scan on next order so the total scan ig related
		 * to freelist_scan_limit.
		 */
		if (order_scanned >= limit)
			limit = min(1U, limit >> 1);
	}

	if (!page) {
		cc->fast_search_fail++;
		if (scan_start) {
			/*
			 * Use the highest PFN found above min. If one was
			 * not found, be pessemistic for direct compaction
			 * and use the min mark.
			 */
			if (highest) {
				page = pfn_to_page(highest);
				cc->free_pfn = highest;
			} else {
1407
				if (cc->direct_compaction && pfn_valid(min_pfn)) {
1408 1409 1410 1411 1412 1413 1414
					page = pfn_to_page(min_pfn);
					cc->free_pfn = min_pfn;
				}
			}
		}
	}

1415 1416
	if (highest && highest >= cc->zone->compact_cached_free_pfn) {
		highest -= pageblock_nr_pages;
1417
		cc->zone->compact_cached_free_pfn = highest;
1418
	}
1419 1420 1421 1422 1423 1424 1425 1426 1427 1428

	cc->total_free_scanned += nr_scanned;
	if (!page)
		return cc->free_pfn;

	low_pfn = page_to_pfn(page);
	fast_isolate_around(cc, low_pfn, nr_isolated);
	return low_pfn;
}

1429
/*
1430 1431
 * Based on information in the current compact_control, find blocks
 * suitable for isolating free pages from and then isolate them.
1432
 */
1433
static void isolate_freepages(struct compact_control *cc)
1434
{
1435
	struct zone *zone = cc->zone;
1436
	struct page *page;
1437
	unsigned long block_start_pfn;	/* start of current pageblock */
1438
	unsigned long isolate_start_pfn; /* exact pfn we start at */
1439 1440
	unsigned long block_end_pfn;	/* end of current pageblock */
	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
1441
	struct list_head *freelist = &cc->freepages;
1442
	unsigned int stride;
1443

1444 1445 1446 1447 1448
	/* Try a small search of the free lists for a candidate */
	isolate_start_pfn = fast_isolate_freepages(cc);
	if (cc->nr_freepages)
		goto splitmap;

1449 1450
	/*
	 * Initialise the free scanner. The starting point is where we last
1451
	 * successfully isolated from, zone-cached value, or the end of the
1452 1453
	 * zone when isolating for the first time. For looping we also need
	 * this pfn aligned down to the pageblock boundary, because we do
1454 1455 1456
	 * block_start_pfn -= pageblock_nr_pages in the for loop.
	 * For ending point, take care when isolating in last pageblock of a
	 * a zone which ends in the middle of a pageblock.
1457 1458
	 * The low boundary is the end of the pageblock the migration scanner
	 * is using.
1459
	 */
1460
	isolate_start_pfn = cc->free_pfn;
1461
	block_start_pfn = pageblock_start_pfn(isolate_start_pfn);
1462 1463
	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
						zone_end_pfn(zone));
1464
	low_pfn = pageblock_end_pfn(cc->migrate_pfn);
1465
	stride = cc->mode == MIGRATE_ASYNC ? COMPACT_CLUSTER_MAX : 1;
1466

1467 1468 1469 1470 1471
	/*
	 * Isolate free pages until enough are available to migrate the
	 * pages on cc->migratepages. We stop searching if the migrate
	 * and free page scanners meet or enough free pages are isolated.
	 */
1472
	for (; block_start_pfn >= low_pfn;
1473
				block_end_pfn = block_start_pfn,
1474 1475
				block_start_pfn -= pageblock_nr_pages,
				isolate_start_pfn = block_start_pfn) {
1476 1477
		unsigned long nr_isolated;

1478 1479
		/*
		 * This can iterate a massively long zone without finding any
1480
		 * suitable migration targets, so periodically check resched.
1481
		 */
1482
		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1483
			cond_resched();
1484

1485 1486 1487
		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
									zone);
		if (!page)
1488 1489 1490
			continue;

		/* Check the block is suitable for migration */
1491
		if (!suitable_migration_target(cc, page))
1492
			continue;
1493

1494 1495 1496 1497
		/* If isolation recently failed, do not retry */
		if (!isolation_suitable(cc, page))
			continue;

1498
		/* Found a block suitable for isolating free pages from. */
1499 1500
		nr_isolated = isolate_freepages_block(cc, &isolate_start_pfn,
					block_end_pfn, freelist, stride, false);
1501

1502 1503 1504 1505
		/* Update the skip hint if the full pageblock was scanned */
		if (isolate_start_pfn == block_end_pfn)
			update_pageblock_skip(cc, page, block_start_pfn);

1506 1507
		/* Are enough freepages isolated? */
		if (cc->nr_freepages >= cc->nr_migratepages) {
1508 1509 1510 1511 1512
			if (isolate_start_pfn >= block_end_pfn) {
				/*
				 * Restart at previous pageblock if more
				 * freepages can be isolated next time.
				 */
1513 1514
				isolate_start_pfn =
					block_start_pfn - pageblock_nr_pages;
1515
			}
1516
			break;
1517
		} else if (isolate_start_pfn < block_end_pfn) {
1518
			/*
1519 1520
			 * If isolation failed early, do not continue
			 * needlessly.
1521
			 */
1522
			break;
1523
		}
1524 1525 1526 1527 1528 1529 1530

		/* Adjust stride depending on isolation */
		if (nr_isolated) {
			stride = 1;
			continue;
		}
		stride = min_t(unsigned int, COMPACT_CLUSTER_MAX, stride << 1);
1531 1532
	}

1533
	/*
1534 1535 1536 1537
	 * Record where the free scanner will restart next time. Either we
	 * broke from the loop and set isolate_start_pfn based on the last
	 * call to isolate_freepages_block(), or we met the migration scanner
	 * and the loop terminated due to isolate_start_pfn < low_pfn
1538
	 */
1539
	cc->free_pfn = isolate_start_pfn;
1540 1541 1542 1543

splitmap:
	/* __isolate_free_page() does not map the pages */
	split_map_pages(freelist);
1544 1545 1546 1547 1548 1549 1550
}

/*
 * This is a migrate-callback that "allocates" freepages by taking pages
 * from the isolated freelists in the block we are migrating to.
 */
static struct page *compaction_alloc(struct page *migratepage,
1551
					unsigned long data)
1552 1553 1554 1555 1556
{
	struct compact_control *cc = (struct compact_control *)data;
	struct page *freepage;

	if (list_empty(&cc->freepages)) {
1557
		isolate_freepages(cc);
1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570

		if (list_empty(&cc->freepages))
			return NULL;
	}

	freepage = list_entry(cc->freepages.next, struct page, lru);
	list_del(&freepage->lru);
	cc->nr_freepages--;

	return freepage;
}

/*
1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582
 * This is a migrate-callback that "frees" freepages back to the isolated
 * freelist.  All pages on the freelist are from the same zone, so there is no
 * special handling needed for NUMA.
 */
static void compaction_free(struct page *page, unsigned long data)
{
	struct compact_control *cc = (struct compact_control *)data;

	list_add(&page->lru, &cc->freepages);
	cc->nr_freepages++;
}

1583 1584 1585 1586 1587 1588 1589
/* possible outcome of isolate_migratepages */
typedef enum {
	ISOLATE_ABORT,		/* Abort compaction now */
	ISOLATE_NONE,		/* No pages isolated, continue scanning */
	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
} isolate_migrate_t;

1590 1591 1592 1593 1594 1595
/*
 * Allow userspace to control policy on scanning the unevictable LRU for
 * compactable pages.
 */
int sysctl_compact_unevictable_allowed __read_mostly = 1;

1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708
static inline void
update_fast_start_pfn(struct compact_control *cc, unsigned long pfn)
{
	if (cc->fast_start_pfn == ULONG_MAX)
		return;

	if (!cc->fast_start_pfn)
		cc->fast_start_pfn = pfn;

	cc->fast_start_pfn = min(cc->fast_start_pfn, pfn);
}

static inline unsigned long
reinit_migrate_pfn(struct compact_control *cc)
{
	if (!cc->fast_start_pfn || cc->fast_start_pfn == ULONG_MAX)
		return cc->migrate_pfn;

	cc->migrate_pfn = cc->fast_start_pfn;
	cc->fast_start_pfn = ULONG_MAX;

	return cc->migrate_pfn;
}

/*
 * Briefly search the free lists for a migration source that already has
 * some free pages to reduce the number of pages that need migration
 * before a pageblock is free.
 */
static unsigned long fast_find_migrateblock(struct compact_control *cc)
{
	unsigned int limit = freelist_scan_limit(cc);
	unsigned int nr_scanned = 0;
	unsigned long distance;
	unsigned long pfn = cc->migrate_pfn;
	unsigned long high_pfn;
	int order;

	/* Skip hints are relied on to avoid repeats on the fast search */
	if (cc->ignore_skip_hint)
		return pfn;

	/*
	 * If the migrate_pfn is not at the start of a zone or the start
	 * of a pageblock then assume this is a continuation of a previous
	 * scan restarted due to COMPACT_CLUSTER_MAX.
	 */
	if (pfn != cc->zone->zone_start_pfn && pfn != pageblock_start_pfn(pfn))
		return pfn;

	/*
	 * For smaller orders, just linearly scan as the number of pages
	 * to migrate should be relatively small and does not necessarily
	 * justify freeing up a large block for a small allocation.
	 */
	if (cc->order <= PAGE_ALLOC_COSTLY_ORDER)
		return pfn;

	/*
	 * Only allow kcompactd and direct requests for movable pages to
	 * quickly clear out a MOVABLE pageblock for allocation. This
	 * reduces the risk that a large movable pageblock is freed for
	 * an unmovable/reclaimable small allocation.
	 */
	if (cc->direct_compaction && cc->migratetype != MIGRATE_MOVABLE)
		return pfn;

	/*
	 * When starting the migration scanner, pick any pageblock within the
	 * first half of the search space. Otherwise try and pick a pageblock
	 * within the first eighth to reduce the chances that a migration
	 * target later becomes a source.
	 */
	distance = (cc->free_pfn - cc->migrate_pfn) >> 1;
	if (cc->migrate_pfn != cc->zone->zone_start_pfn)
		distance >>= 2;
	high_pfn = pageblock_start_pfn(cc->migrate_pfn + distance);

	for (order = cc->order - 1;
	     order >= PAGE_ALLOC_COSTLY_ORDER && pfn == cc->migrate_pfn && nr_scanned < limit;
	     order--) {
		struct free_area *area = &cc->zone->free_area[order];
		struct list_head *freelist;
		unsigned long flags;
		struct page *freepage;

		if (!area->nr_free)
			continue;

		spin_lock_irqsave(&cc->zone->lock, flags);
		freelist = &area->free_list[MIGRATE_MOVABLE];
		list_for_each_entry(freepage, freelist, lru) {
			unsigned long free_pfn;

			nr_scanned++;
			free_pfn = page_to_pfn(freepage);
			if (free_pfn < high_pfn) {
				/*
				 * Avoid if skipped recently. Ideally it would
				 * move to the tail but even safe iteration of
				 * the list assumes an entry is deleted, not
				 * reordered.
				 */
				if (get_pageblock_skip(freepage)) {
					if (list_is_last(freelist, &freepage->lru))
						break;

					continue;
				}

				/* Reorder to so a future search skips recent pages */
				move_freelist_tail(freelist, freepage);

1709
				update_fast_start_pfn(cc, free_pfn);
1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736
				pfn = pageblock_start_pfn(free_pfn);
				cc->fast_search_fail = 0;
				set_pageblock_skip(freepage);
				break;
			}

			if (nr_scanned >= limit) {
				cc->fast_search_fail++;
				move_freelist_tail(freelist, freepage);
				break;
			}
		}
		spin_unlock_irqrestore(&cc->zone->lock, flags);
	}

	cc->total_migrate_scanned += nr_scanned;

	/*
	 * If fast scanning failed then use a cached entry for a page block
	 * that had free pages as the basis for starting a linear scan.
	 */
	if (pfn == cc->migrate_pfn)
		pfn = reinit_migrate_pfn(cc);

	return pfn;
}

1737
/*
1738 1739 1740
 * Isolate all pages that can be migrated from the first suitable block,
 * starting at the block pointed to by the migrate scanner pfn within
 * compact_control.
1741
 */
1742
static isolate_migrate_t isolate_migratepages(struct compact_control *cc)
1743
{
1744 1745 1746
	unsigned long block_start_pfn;
	unsigned long block_end_pfn;
	unsigned long low_pfn;
1747 1748
	struct page *page;
	const isolate_mode_t isolate_mode =
1749
		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1750
		(cc->mode != MIGRATE_SYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1751
	bool fast_find_block;
1752

1753 1754
	/*
	 * Start at where we last stopped, or beginning of the zone as
1755 1756
	 * initialized by compact_zone(). The first failure will use
	 * the lowest PFN as the starting point for linear scanning.
1757
	 */
1758
	low_pfn = fast_find_migrateblock(cc);
1759
	block_start_pfn = pageblock_start_pfn(low_pfn);
1760 1761
	if (block_start_pfn < cc->zone->zone_start_pfn)
		block_start_pfn = cc->zone->zone_start_pfn;
1762

1763 1764 1765 1766 1767 1768 1769
	/*
	 * fast_find_migrateblock marks a pageblock skipped so to avoid
	 * the isolation_suitable check below, check whether the fast
	 * search was successful.
	 */
	fast_find_block = low_pfn != cc->migrate_pfn && !cc->fast_search_fail;

1770
	/* Only scan within a pageblock boundary */
1771
	block_end_pfn = pageblock_end_pfn(low_pfn);
1772

1773 1774 1775 1776
	/*
	 * Iterate over whole pageblocks until we find the first suitable.
	 * Do not cross the free scanner.
	 */
1777
	for (; block_end_pfn <= cc->free_pfn;
1778
			fast_find_block = false,
1779 1780 1781
			low_pfn = block_end_pfn,
			block_start_pfn = block_end_pfn,
			block_end_pfn += pageblock_nr_pages) {
1782

1783 1784 1785
		/*
		 * This can potentially iterate a massively long zone with
		 * many pageblocks unsuitable, so periodically check if we
1786
		 * need to schedule.
1787
		 */
1788
		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1789
			cond_resched();
1790

1791 1792
		page = pageblock_pfn_to_page(block_start_pfn,
						block_end_pfn, cc->zone);
1793
		if (!page)
1794 1795
			continue;

1796 1797 1798 1799 1800 1801 1802 1803 1804
		/*
		 * If isolation recently failed, do not retry. Only check the
		 * pageblock once. COMPACT_CLUSTER_MAX causes a pageblock
		 * to be visited multiple times. Assume skip was checked
		 * before making it "skip" so other compaction instances do
		 * not scan the same block.
		 */
		if (IS_ALIGNED(low_pfn, pageblock_nr_pages) &&
		    !fast_find_block && !isolation_suitable(cc, page))
1805 1806 1807
			continue;

		/*
1808 1809 1810 1811 1812 1813
		 * For async compaction, also only scan in MOVABLE blocks
		 * without huge pages. Async compaction is optimistic to see
		 * if the minimum amount of work satisfies the allocation.
		 * The cached PFN is updated as it's possible that all
		 * remaining blocks between source and target are unsuitable
		 * and the compaction scanners fail to meet.
1814
		 */
1815 1816
		if (!suitable_migration_source(cc, page)) {
			update_cached_migrate(cc, block_end_pfn);
1817
			continue;
1818
		}
1819 1820

		/* Perform the isolation */
1821 1822
		low_pfn = isolate_migratepages_block(cc, low_pfn,
						block_end_pfn, isolate_mode);
1823

1824
		if (!low_pfn)
1825 1826 1827 1828 1829 1830 1831 1832 1833 1834
			return ISOLATE_ABORT;

		/*
		 * Either we isolated something and proceed with migration. Or
		 * we failed and compact_zone should decide if we should
		 * continue or not.
		 */
		break;
	}

1835 1836
	/* Record where migration scanner will be restarted. */
	cc->migrate_pfn = low_pfn;
1837

1838
	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1839 1840
}

1841 1842 1843 1844 1845 1846 1847 1848 1849
/*
 * order == -1 is expected when compacting via
 * /proc/sys/vm/compact_memory
 */
static inline bool is_via_compact_memory(int order)
{
	return order == -1;
}

1850
static enum compact_result __compact_finished(struct compact_control *cc)
1851
{
1852
	unsigned int order;
1853
	const int migratetype = cc->migratetype;
1854
	int ret;
1855

1856
	/* Compaction run completes if the migrate and free scanner meet */
1857
	if (compact_scanners_met(cc)) {
1858
		/* Let the next compaction start anew. */
1859
		reset_cached_positions(cc->zone);
1860

1861 1862
		/*
		 * Mark that the PG_migrate_skip information should be cleared
1863
		 * by kswapd when it goes to sleep. kcompactd does not set the
1864 1865 1866
		 * flag itself as the decision to be clear should be directly
		 * based on an allocation request.
		 */
1867
		if (cc->direct_compaction)
1868
			cc->zone->compact_blockskip_flush = true;
1869

1870 1871 1872 1873
		if (cc->whole_zone)
			return COMPACT_COMPLETE;
		else
			return COMPACT_PARTIAL_SKIPPED;
1874
	}
1875

1876
	if (is_via_compact_memory(cc->order))
1877 1878
		return COMPACT_CONTINUE;

1879 1880 1881 1882 1883 1884 1885 1886
	/*
	 * Always finish scanning a pageblock to reduce the possibility of
	 * fallbacks in the future. This is particularly important when
	 * migration source is unmovable/reclaimable but it's not worth
	 * special casing.
	 */
	if (!IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
		return COMPACT_CONTINUE;
1887

1888
	/* Direct compactor: Is a suitable page free? */
1889
	ret = COMPACT_NO_SUITABLE_PAGE;
1890
	for (order = cc->order; order < MAX_ORDER; order++) {
1891
		struct free_area *area = &cc->zone->free_area[order];
1892
		bool can_steal;
1893 1894

		/* Job done if page is free of the right migratetype */
1895
		if (!free_area_empty(area, migratetype))
1896
			return COMPACT_SUCCESS;
1897

1898 1899 1900
#ifdef CONFIG_CMA
		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
		if (migratetype == MIGRATE_MOVABLE &&
1901
			!free_area_empty(area, MIGRATE_CMA))
1902
			return COMPACT_SUCCESS;
1903 1904 1905 1906 1907 1908
#endif
		/*
		 * Job done if allocation would steal freepages from
		 * other migratetype buddy lists.
		 */
		if (find_suitable_fallback(area, order, migratetype,
1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928
						true, &can_steal) != -1) {

			/* movable pages are OK in any pageblock */
			if (migratetype == MIGRATE_MOVABLE)
				return COMPACT_SUCCESS;

			/*
			 * We are stealing for a non-movable allocation. Make
			 * sure we finish compacting the current pageblock
			 * first so it is as free as possible and we won't
			 * have to steal another one soon. This only applies
			 * to sync compaction, as async compaction operates
			 * on pageblocks of the same migratetype.
			 */
			if (cc->mode == MIGRATE_ASYNC ||
					IS_ALIGNED(cc->migrate_pfn,
							pageblock_nr_pages)) {
				return COMPACT_SUCCESS;
			}

1929 1930
			ret = COMPACT_CONTINUE;
			break;
1931
		}
1932 1933
	}

1934 1935 1936 1937
	if (cc->contended || fatal_signal_pending(current))
		ret = COMPACT_CONTENDED;

	return ret;
1938 1939
}

1940
static enum compact_result compact_finished(struct compact_control *cc)
1941 1942 1943
{
	int ret;

1944 1945
	ret = __compact_finished(cc);
	trace_mm_compaction_finished(cc->zone, cc->order, ret);
1946 1947 1948 1949
	if (ret == COMPACT_NO_SUITABLE_PAGE)
		ret = COMPACT_CONTINUE;

	return ret;
1950 1951
}

1952 1953 1954 1955
/*
 * compaction_suitable: Is this suitable to run compaction on this zone now?
 * Returns
 *   COMPACT_SKIPPED  - If there are too few free pages for compaction
1956
 *   COMPACT_SUCCESS  - If the allocation would succeed without compaction
1957 1958
 *   COMPACT_CONTINUE - If compaction should run now
 */
1959
static enum compact_result __compaction_suitable(struct zone *zone, int order,
1960
					unsigned int alloc_flags,
1961 1962
					int classzone_idx,
					unsigned long wmark_target)
1963 1964 1965
{
	unsigned long watermark;

1966
	if (is_via_compact_memory(order))
1967 1968
		return COMPACT_CONTINUE;

1969
	watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
1970 1971 1972 1973 1974 1975
	/*
	 * If watermarks for high-order allocation are already met, there
	 * should be no need for compaction at all.
	 */
	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
								alloc_flags))
1976
		return COMPACT_SUCCESS;
1977

1978
	/*
1979
	 * Watermarks for order-0 must be met for compaction to be able to
1980 1981 1982 1983 1984 1985 1986
	 * isolate free pages for migration targets. This means that the
	 * watermark and alloc_flags have to match, or be more pessimistic than
	 * the check in __isolate_free_page(). We don't use the direct
	 * compactor's alloc_flags, as they are not relevant for freepage
	 * isolation. We however do use the direct compactor's classzone_idx to
	 * skip over zones where lowmem reserves would prevent allocation even
	 * if compaction succeeds.
1987 1988
	 * For costly orders, we require low watermark instead of min for
	 * compaction to proceed to increase its chances.
1989 1990
	 * ALLOC_CMA is used, as pages in CMA pageblocks are considered
	 * suitable migration targets
1991
	 */
1992 1993 1994
	watermark = (order > PAGE_ALLOC_COSTLY_ORDER) ?
				low_wmark_pages(zone) : min_wmark_pages(zone);
	watermark += compact_gap(order);
1995
	if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
1996
						ALLOC_CMA, wmark_target))
1997 1998
		return COMPACT_SKIPPED;

1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010
	return COMPACT_CONTINUE;
}

enum compact_result compaction_suitable(struct zone *zone, int order,
					unsigned int alloc_flags,
					int classzone_idx)
{
	enum compact_result ret;
	int fragindex;

	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx,
				    zone_page_state(zone, NR_FREE_PAGES));
2011 2012 2013 2014
	/*
	 * fragmentation index determines if allocation failures are due to
	 * low memory or external fragmentation
	 *
2015 2016
	 * index of -1000 would imply allocations might succeed depending on
	 * watermarks, but we already failed the high-order watermark check
2017 2018 2019
	 * index towards 0 implies failure is due to lack of memory
	 * index towards 1000 implies failure is due to fragmentation
	 *
2020 2021 2022 2023 2024 2025
	 * Only compact if a failure would be due to fragmentation. Also
	 * ignore fragindex for non-costly orders where the alternative to
	 * a successful reclaim/compaction is OOM. Fragindex and the
	 * vm.extfrag_threshold sysctl is meant as a heuristic to prevent
	 * excessive compaction for costly orders, but it should not be at the
	 * expense of system stability.
2026
	 */
2027
	if (ret == COMPACT_CONTINUE && (order > PAGE_ALLOC_COSTLY_ORDER)) {
2028 2029 2030 2031
		fragindex = fragmentation_index(zone, order);
		if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
			ret = COMPACT_NOT_SUITABLE_ZONE;
	}
2032 2033 2034 2035 2036 2037 2038 2039

	trace_mm_compaction_suitable(zone, order, ret);
	if (ret == COMPACT_NOT_SUITABLE_ZONE)
		ret = COMPACT_SKIPPED;

	return ret;
}

2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060
bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
		int alloc_flags)
{
	struct zone *zone;
	struct zoneref *z;

	/*
	 * Make sure at least one zone would pass __compaction_suitable if we continue
	 * retrying the reclaim.
	 */
	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
					ac->nodemask) {
		unsigned long available;
		enum compact_result compact_result;

		/*
		 * Do not consider all the reclaimable memory because we do not
		 * want to trash just for a single high order allocation which
		 * is even not guaranteed to appear even if __compaction_suitable
		 * is happy about the watermark check.
		 */
2061
		available = zone_reclaimable_pages(zone) / order;
2062 2063 2064
		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
		compact_result = __compaction_suitable(zone, order, alloc_flags,
				ac_classzone_idx(ac), available);
2065
		if (compact_result != COMPACT_SKIPPED)
2066 2067 2068 2069 2070 2071
			return true;
	}

	return false;
}

2072 2073
static enum compact_result
compact_zone(struct compact_control *cc, struct capture_control *capc)
2074
{
2075
	enum compact_result ret;
2076 2077
	unsigned long start_pfn = cc->zone->zone_start_pfn;
	unsigned long end_pfn = zone_end_pfn(cc->zone);
2078
	unsigned long last_migrated_pfn;
2079
	const bool sync = cc->mode != MIGRATE_ASYNC;
2080
	bool update_cached;
2081

2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092
	/*
	 * These counters track activities during zone compaction.  Initialize
	 * them before compacting a new zone.
	 */
	cc->total_migrate_scanned = 0;
	cc->total_free_scanned = 0;
	cc->nr_migratepages = 0;
	cc->nr_freepages = 0;
	INIT_LIST_HEAD(&cc->freepages);
	INIT_LIST_HEAD(&cc->migratepages);

2093
	cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask);
2094
	ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags,
2095
							cc->classzone_idx);
2096
	/* Compaction is likely to fail */
2097
	if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED)
2098
		return ret;
2099 2100 2101

	/* huh, compaction_suitable is returning something unexpected */
	VM_BUG_ON(ret != COMPACT_CONTINUE);
2102

2103 2104
	/*
	 * Clear pageblock skip if there were failures recently and compaction
2105
	 * is about to be retried after being deferred.
2106
	 */
2107 2108
	if (compaction_restarting(cc->zone, cc->order))
		__reset_isolation_suitable(cc->zone);
2109

2110 2111
	/*
	 * Setup to move all movable pages to the end of the zone. Used cached
2112 2113 2114
	 * information on where the scanners should start (unless we explicitly
	 * want to compact the whole zone), but check that it is initialised
	 * by ensuring the values are within zone boundaries.
2115
	 */
2116
	cc->fast_start_pfn = 0;
2117
	if (cc->whole_zone) {
2118
		cc->migrate_pfn = start_pfn;
2119 2120
		cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
	} else {
2121 2122
		cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync];
		cc->free_pfn = cc->zone->compact_cached_free_pfn;
2123 2124
		if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) {
			cc->free_pfn = pageblock_start_pfn(end_pfn - 1);
2125
			cc->zone->compact_cached_free_pfn = cc->free_pfn;
2126 2127 2128
		}
		if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) {
			cc->migrate_pfn = start_pfn;
2129 2130
			cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
			cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
2131
		}
2132

2133
		if (cc->migrate_pfn <= cc->zone->compact_init_migrate_pfn)
2134 2135
			cc->whole_zone = true;
	}
2136

2137
	last_migrated_pfn = 0;
2138

2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149
	/*
	 * Migrate has separate cached PFNs for ASYNC and SYNC* migration on
	 * the basis that some migrations will fail in ASYNC mode. However,
	 * if the cached PFNs match and pageblocks are skipped due to having
	 * no isolation candidates, then the sync state does not matter.
	 * Until a pageblock with isolation candidates is found, keep the
	 * cached PFNs in sync to avoid revisiting the same blocks.
	 */
	update_cached = !sync &&
		cc->zone->compact_cached_migrate_pfn[0] == cc->zone->compact_cached_migrate_pfn[1];

2150 2151
	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
				cc->free_pfn, end_pfn, sync);
2152

2153 2154
	migrate_prep_local();

2155
	while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) {
2156
		int err;
2157
		unsigned long start_pfn = cc->migrate_pfn;
2158

2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172
		/*
		 * Avoid multiple rescans which can happen if a page cannot be
		 * isolated (dirty/writeback in async mode) or if the migrated
		 * pages are being allocated before the pageblock is cleared.
		 * The first rescan will capture the entire pageblock for
		 * migration. If it fails, it'll be marked skip and scanning
		 * will proceed as normal.
		 */
		cc->rescan = false;
		if (pageblock_start_pfn(last_migrated_pfn) ==
		    pageblock_start_pfn(start_pfn)) {
			cc->rescan = true;
		}

2173
		switch (isolate_migratepages(cc)) {
2174
		case ISOLATE_ABORT:
2175
			ret = COMPACT_CONTENDED;
2176
			putback_movable_pages(&cc->migratepages);
2177
			cc->nr_migratepages = 0;
2178
			last_migrated_pfn = 0;
2179 2180
			goto out;
		case ISOLATE_NONE:
2181 2182 2183 2184 2185
			if (update_cached) {
				cc->zone->compact_cached_migrate_pfn[1] =
					cc->zone->compact_cached_migrate_pfn[0];
			}

2186 2187 2188 2189 2190 2191
			/*
			 * We haven't isolated and migrated anything, but
			 * there might still be unflushed migrations from
			 * previous cc->order aligned block.
			 */
			goto check_drain;
2192
		case ISOLATE_SUCCESS:
2193
			update_cached = false;
2194
			last_migrated_pfn = start_pfn;
2195 2196
			;
		}
2197

2198
		err = migrate_pages(&cc->migratepages, compaction_alloc,
2199
				compaction_free, (unsigned long)cc, cc->mode,
2200
				MR_COMPACTION);
2201

2202 2203
		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
							&cc->migratepages);
2204

2205 2206
		/* All pages were either migrated or will be released */
		cc->nr_migratepages = 0;
2207
		if (err) {
2208
			putback_movable_pages(&cc->migratepages);
2209 2210 2211 2212
			/*
			 * migrate_pages() may return -ENOMEM when scanners meet
			 * and we want compact_finished() to detect it
			 */
2213
			if (err == -ENOMEM && !compact_scanners_met(cc)) {
2214
				ret = COMPACT_CONTENDED;
2215 2216
				goto out;
			}
2217 2218 2219 2220 2221 2222 2223 2224 2225
			/*
			 * We failed to migrate at least one page in the current
			 * order-aligned block, so skip the rest of it.
			 */
			if (cc->direct_compaction &&
						(cc->mode == MIGRATE_ASYNC)) {
				cc->migrate_pfn = block_end_pfn(
						cc->migrate_pfn - 1, cc->order);
				/* Draining pcplists is useless in this case */
2226
				last_migrated_pfn = 0;
2227
			}
2228
		}
2229 2230 2231 2232 2233 2234 2235 2236 2237

check_drain:
		/*
		 * Has the migration scanner moved away from the previous
		 * cc->order aligned block where we migrated from? If yes,
		 * flush the pages that were freed, so that they can merge and
		 * compact_finished() can detect immediately if allocation
		 * would succeed.
		 */
2238
		if (cc->order > 0 && last_migrated_pfn) {
2239 2240
			int cpu;
			unsigned long current_block_start =
2241
				block_start_pfn(cc->migrate_pfn, cc->order);
2242

2243
			if (last_migrated_pfn < current_block_start) {
2244 2245
				cpu = get_cpu();
				lru_add_drain_cpu(cpu);
2246
				drain_local_pages(cc->zone);
2247 2248
				put_cpu();
				/* No more flushing until we migrate again */
2249
				last_migrated_pfn = 0;
2250 2251 2252
			}
		}

2253 2254 2255 2256 2257
		/* Stop if a page has been captured */
		if (capc && capc->page) {
			ret = COMPACT_SUCCESS;
			break;
		}
2258 2259
	}

2260
out:
2261 2262 2263 2264 2265 2266 2267 2268 2269 2270
	/*
	 * Release free pages and update where the free scanner should restart,
	 * so we don't leave any returned pages behind in the next attempt.
	 */
	if (cc->nr_freepages > 0) {
		unsigned long free_pfn = release_freepages(&cc->freepages);

		cc->nr_freepages = 0;
		VM_BUG_ON(free_pfn == 0);
		/* The cached pfn is always the first in a pageblock */
2271
		free_pfn = pageblock_start_pfn(free_pfn);
2272 2273 2274 2275
		/*
		 * Only go back, not forward. The cached pfn might have been
		 * already reset to zone end in compact_finished()
		 */
2276 2277
		if (free_pfn > cc->zone->compact_cached_free_pfn)
			cc->zone->compact_cached_free_pfn = free_pfn;
2278
	}
2279

2280 2281 2282
	count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned);
	count_compact_events(COMPACTFREE_SCANNED, cc->total_free_scanned);

2283 2284
	trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
				cc->free_pfn, end_pfn, sync, ret);
2285

2286 2287
	return ret;
}
2288

2289
static enum compact_result compact_zone_order(struct zone *zone, int order,
2290
		gfp_t gfp_mask, enum compact_priority prio,
2291 2292
		unsigned int alloc_flags, int classzone_idx,
		struct page **capture)
2293
{
2294
	enum compact_result ret;
2295 2296
	struct compact_control cc = {
		.order = order,
2297
		.search_order = order,
2298
		.gfp_mask = gfp_mask,
2299
		.zone = zone,
2300 2301
		.mode = (prio == COMPACT_PRIO_ASYNC) ?
					MIGRATE_ASYNC :	MIGRATE_SYNC_LIGHT,
2302 2303
		.alloc_flags = alloc_flags,
		.classzone_idx = classzone_idx,
2304
		.direct_compaction = true,
2305
		.whole_zone = (prio == MIN_COMPACT_PRIORITY),
2306 2307
		.ignore_skip_hint = (prio == MIN_COMPACT_PRIORITY),
		.ignore_block_suitable = (prio == MIN_COMPACT_PRIORITY)
2308
	};
2309 2310 2311 2312 2313 2314 2315
	struct capture_control capc = {
		.cc = &cc,
		.page = NULL,
	};

	if (capture)
		current->capture_control = &capc;
2316

2317
	ret = compact_zone(&cc, &capc);
2318 2319 2320 2321

	VM_BUG_ON(!list_empty(&cc.freepages));
	VM_BUG_ON(!list_empty(&cc.migratepages));

2322 2323 2324
	*capture = capc.page;
	current->capture_control = NULL;

2325
	return ret;
2326 2327
}

2328 2329
int sysctl_extfrag_threshold = 500;

2330 2331 2332
/**
 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
 * @gfp_mask: The GFP mask of the current allocation
2333 2334 2335
 * @order: The order of the current allocation
 * @alloc_flags: The allocation flags of the current allocation
 * @ac: The context of current allocation
2336
 * @prio: Determines how hard direct compaction should try to succeed
2337 2338 2339
 *
 * This is the main entry point for direct page compaction.
 */
2340
enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
2341
		unsigned int alloc_flags, const struct alloc_context *ac,
2342
		enum compact_priority prio, struct page **capture)
2343 2344 2345 2346
{
	int may_perform_io = gfp_mask & __GFP_IO;
	struct zoneref *z;
	struct zone *zone;
2347
	enum compact_result rc = COMPACT_SKIPPED;
2348

2349 2350 2351 2352 2353
	/*
	 * Check if the GFP flags allow compaction - GFP_NOIO is really
	 * tricky context because the migration might require IO
	 */
	if (!may_perform_io)
2354
		return COMPACT_SKIPPED;
2355

2356
	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
2357

2358
	/* Compact each zone in the list */
2359 2360
	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
								ac->nodemask) {
2361
		enum compact_result status;
2362

2363 2364
		if (prio > MIN_COMPACT_PRIORITY
					&& compaction_deferred(zone, order)) {
2365
			rc = max_t(enum compact_result, COMPACT_DEFERRED, rc);
2366
			continue;
2367
		}
2368

2369
		status = compact_zone_order(zone, order, gfp_mask, prio,
2370
				alloc_flags, ac_classzone_idx(ac), capture);
2371 2372
		rc = max(status, rc);

2373 2374
		/* The allocation should succeed, stop compacting */
		if (status == COMPACT_SUCCESS) {
2375 2376 2377 2378 2379 2380 2381
			/*
			 * We think the allocation will succeed in this zone,
			 * but it is not certain, hence the false. The caller
			 * will repeat this with true if allocation indeed
			 * succeeds in this zone.
			 */
			compaction_defer_reset(zone, order, false);
2382

2383
			break;
2384 2385
		}

2386
		if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
2387
					status == COMPACT_PARTIAL_SKIPPED))
2388 2389 2390 2391 2392 2393
			/*
			 * We think that allocation won't succeed in this zone
			 * so we defer compaction there. If it ends up
			 * succeeding after all, it will be reset.
			 */
			defer_compaction(zone, order);
2394 2395 2396 2397

		/*
		 * We might have stopped compacting due to need_resched() in
		 * async compaction, or due to a fatal signal detected. In that
2398
		 * case do not try further zones
2399
		 */
2400 2401 2402
		if ((prio == COMPACT_PRIO_ASYNC && need_resched())
					|| fatal_signal_pending(current))
			break;
2403 2404 2405 2406 2407 2408
	}

	return rc;
}


2409
/* Compact all zones within a node */
2410
static void compact_node(int nid)
2411
{
2412
	pg_data_t *pgdat = NODE_DATA(nid);
2413 2414
	int zoneid;
	struct zone *zone;
2415 2416 2417 2418 2419
	struct compact_control cc = {
		.order = -1,
		.mode = MIGRATE_SYNC,
		.ignore_skip_hint = true,
		.whole_zone = true,
2420
		.gfp_mask = GFP_KERNEL,
2421 2422
	};

2423 2424 2425 2426 2427 2428 2429

	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {

		zone = &pgdat->node_zones[zoneid];
		if (!populated_zone(zone))
			continue;

2430
		cc.zone = zone;
2431

2432
		compact_zone(&cc, NULL);
2433

2434 2435
		VM_BUG_ON(!list_empty(&cc.freepages));
		VM_BUG_ON(!list_empty(&cc.migratepages));
2436 2437 2438 2439
	}
}

/* Compact all nodes in the system */
2440
static void compact_nodes(void)
2441 2442 2443
{
	int nid;

2444 2445 2446
	/* Flush pending updates to the LRU lists */
	lru_add_drain_all();

2447 2448 2449 2450 2451 2452 2453
	for_each_online_node(nid)
		compact_node(nid);
}

/* The written value is actually unused, all memory is compacted */
int sysctl_compact_memory;

2454 2455 2456 2457
/*
 * This is the entry point for compacting all nodes via
 * /proc/sys/vm/compact_memory
 */
2458 2459 2460 2461
int sysctl_compaction_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	if (write)
2462
		compact_nodes();
2463 2464 2465

	return 0;
}
2466

2467 2468 2469 2470 2471 2472 2473 2474
int sysctl_extfrag_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	proc_dointvec_minmax(table, write, buffer, length, ppos);

	return 0;
}

2475
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
2476
static ssize_t sysfs_compact_node(struct device *dev,
2477
			struct device_attribute *attr,
2478 2479
			const char *buf, size_t count)
{
2480 2481 2482 2483 2484 2485 2486 2487
	int nid = dev->id;

	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
		/* Flush pending updates to the LRU lists */
		lru_add_drain_all();

		compact_node(nid);
	}
2488 2489 2490

	return count;
}
2491
static DEVICE_ATTR(compact, 0200, NULL, sysfs_compact_node);
2492 2493 2494

int compaction_register_node(struct node *node)
{
2495
	return device_create_file(&node->dev, &dev_attr_compact);
2496 2497 2498 2499
}

void compaction_unregister_node(struct node *node)
{
2500
	return device_remove_file(&node->dev, &dev_attr_compact);
2501 2502
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
2503

2504 2505
static inline bool kcompactd_work_requested(pg_data_t *pgdat)
{
2506
	return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
2507 2508 2509 2510 2511 2512 2513 2514
}

static bool kcompactd_node_suitable(pg_data_t *pgdat)
{
	int zoneid;
	struct zone *zone;
	enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;

2515
	for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538
		zone = &pgdat->node_zones[zoneid];

		if (!populated_zone(zone))
			continue;

		if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
					classzone_idx) == COMPACT_CONTINUE)
			return true;
	}

	return false;
}

static void kcompactd_do_work(pg_data_t *pgdat)
{
	/*
	 * With no special task, compact all zones so that a page of requested
	 * order is allocatable.
	 */
	int zoneid;
	struct zone *zone;
	struct compact_control cc = {
		.order = pgdat->kcompactd_max_order,
2539
		.search_order = pgdat->kcompactd_max_order,
2540 2541
		.classzone_idx = pgdat->kcompactd_classzone_idx,
		.mode = MIGRATE_SYNC_LIGHT,
2542
		.ignore_skip_hint = false,
2543
		.gfp_mask = GFP_KERNEL,
2544 2545 2546
	};
	trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
							cc.classzone_idx);
2547
	count_compact_event(KCOMPACTD_WAKE);
2548

2549
	for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562
		int status;

		zone = &pgdat->node_zones[zoneid];
		if (!populated_zone(zone))
			continue;

		if (compaction_deferred(zone, cc.order))
			continue;

		if (compaction_suitable(zone, cc.order, 0, zoneid) !=
							COMPACT_CONTINUE)
			continue;

2563 2564
		if (kthread_should_stop())
			return;
2565 2566

		cc.zone = zone;
2567
		status = compact_zone(&cc, NULL);
2568

2569
		if (status == COMPACT_SUCCESS) {
2570
			compaction_defer_reset(zone, cc.order, false);
2571
		} else if (status == COMPACT_PARTIAL_SKIPPED || status == COMPACT_COMPLETE) {
2572 2573 2574 2575 2576 2577 2578 2579
			/*
			 * Buddy pages may become stranded on pcps that could
			 * otherwise coalesce on the zone's free area for
			 * order >= cc.order.  This is ratelimited by the
			 * upcoming deferral.
			 */
			drain_all_pages(zone);

2580 2581 2582 2583 2584 2585 2586
			/*
			 * We use sync migration mode here, so we defer like
			 * sync direct compaction does.
			 */
			defer_compaction(zone, cc.order);
		}

2587 2588 2589 2590 2591
		count_compact_events(KCOMPACTD_MIGRATE_SCANNED,
				     cc.total_migrate_scanned);
		count_compact_events(KCOMPACTD_FREE_SCANNED,
				     cc.total_free_scanned);

2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617
		VM_BUG_ON(!list_empty(&cc.freepages));
		VM_BUG_ON(!list_empty(&cc.migratepages));
	}

	/*
	 * Regardless of success, we are done until woken up next. But remember
	 * the requested order/classzone_idx in case it was higher/tighter than
	 * our current ones
	 */
	if (pgdat->kcompactd_max_order <= cc.order)
		pgdat->kcompactd_max_order = 0;
	if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
		pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
}

void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
{
	if (!order)
		return;

	if (pgdat->kcompactd_max_order < order)
		pgdat->kcompactd_max_order = order;

	if (pgdat->kcompactd_classzone_idx > classzone_idx)
		pgdat->kcompactd_classzone_idx = classzone_idx;

2618 2619 2620 2621 2622
	/*
	 * Pairs with implicit barrier in wait_event_freezable()
	 * such that wakeups are not missed.
	 */
	if (!wq_has_sleeper(&pgdat->kcompactd_wait))
2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652
		return;

	if (!kcompactd_node_suitable(pgdat))
		return;

	trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
							classzone_idx);
	wake_up_interruptible(&pgdat->kcompactd_wait);
}

/*
 * The background compaction daemon, started as a kernel thread
 * from the init process.
 */
static int kcompactd(void *p)
{
	pg_data_t *pgdat = (pg_data_t*)p;
	struct task_struct *tsk = current;

	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);

	if (!cpumask_empty(cpumask))
		set_cpus_allowed_ptr(tsk, cpumask);

	set_freezable();

	pgdat->kcompactd_max_order = 0;
	pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;

	while (!kthread_should_stop()) {
2653 2654
		unsigned long pflags;

2655 2656 2657 2658
		trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
		wait_event_freezable(pgdat->kcompactd_wait,
				kcompactd_work_requested(pgdat));

2659
		psi_memstall_enter(&pflags);
2660
		kcompactd_do_work(pgdat);
2661
		psi_memstall_leave(&pflags);
2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 2705 2706 2707
	}

	return 0;
}

/*
 * This kcompactd start function will be called by init and node-hot-add.
 * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
 */
int kcompactd_run(int nid)
{
	pg_data_t *pgdat = NODE_DATA(nid);
	int ret = 0;

	if (pgdat->kcompactd)
		return 0;

	pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
	if (IS_ERR(pgdat->kcompactd)) {
		pr_err("Failed to start kcompactd on node %d\n", nid);
		ret = PTR_ERR(pgdat->kcompactd);
		pgdat->kcompactd = NULL;
	}
	return ret;
}

/*
 * Called by memory hotplug when all memory in a node is offlined. Caller must
 * hold mem_hotplug_begin/end().
 */
void kcompactd_stop(int nid)
{
	struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;

	if (kcompactd) {
		kthread_stop(kcompactd);
		NODE_DATA(nid)->kcompactd = NULL;
	}
}

/*
 * It's optimal to keep kcompactd on the same CPUs as their memory, but
 * not required for correctness. So if the last cpu in a node goes
 * away, we get changed to run anywhere: as the first one comes back,
 * restore their cpu bindings.
 */
2708
static int kcompactd_cpu_online(unsigned int cpu)
2709 2710 2711
{
	int nid;

2712 2713 2714
	for_each_node_state(nid, N_MEMORY) {
		pg_data_t *pgdat = NODE_DATA(nid);
		const struct cpumask *mask;
2715

2716
		mask = cpumask_of_node(pgdat->node_id);
2717

2718 2719 2720
		if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
			/* One of our CPUs online: restore mask */
			set_cpus_allowed_ptr(pgdat->kcompactd, mask);
2721
	}
2722
	return 0;
2723 2724 2725 2726 2727
}

static int __init kcompactd_init(void)
{
	int nid;
2728 2729 2730 2731 2732 2733 2734 2735 2736
	int ret;

	ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
					"mm/compaction:online",
					kcompactd_cpu_online, NULL);
	if (ret < 0) {
		pr_err("kcompactd: failed to register hotplug callbacks.\n");
		return ret;
	}
2737 2738 2739 2740 2741 2742 2743

	for_each_node_state(nid, N_MEMORY)
		kcompactd_run(nid);
	return 0;
}
subsys_initcall(kcompactd_init)

2744
#endif /* CONFIG_COMPACTION */