compaction.c 48.2 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * linux/mm/compaction.c
 *
 * Memory compaction for the reduction of external fragmentation. Note that
 * this heavily depends upon page migration to do all the real heavy
 * lifting
 *
 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
 */
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
#include <linux/backing-dev.h>
15
#include <linux/sysctl.h>
16
#include <linux/sysfs.h>
17
#include <linux/balloon_compaction.h>
18
#include <linux/page-isolation.h>
19
#include <linux/kasan.h>
20 21
#include "internal.h"

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#ifdef CONFIG_COMPACTION
static inline void count_compact_event(enum vm_event_item item)
{
	count_vm_event(item);
}

static inline void count_compact_events(enum vm_event_item item, long delta)
{
	count_vm_events(item, delta);
}
#else
#define count_compact_event(item) do { } while (0)
#define count_compact_events(item, delta) do { } while (0)
#endif

37 38
#if defined CONFIG_COMPACTION || defined CONFIG_CMA

39 40 41
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>

42 43 44
static unsigned long release_freepages(struct list_head *freelist)
{
	struct page *page, *next;
45
	unsigned long high_pfn = 0;
46 47

	list_for_each_entry_safe(page, next, freelist, lru) {
48
		unsigned long pfn = page_to_pfn(page);
49 50
		list_del(&page->lru);
		__free_page(page);
51 52
		if (pfn > high_pfn)
			high_pfn = pfn;
53 54
	}

55
	return high_pfn;
56 57
}

58 59 60 61 62 63 64
static void map_pages(struct list_head *list)
{
	struct page *page;

	list_for_each_entry(page, list, lru) {
		arch_alloc_page(page, 0);
		kernel_map_pages(page, 1, 1);
65
		kasan_alloc_pages(page, 0);
66 67 68
	}
}

69 70 71 72 73
static inline bool migrate_async_suitable(int migratetype)
{
	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
}

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
/*
 * Check that the whole (or subset of) a pageblock given by the interval of
 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
 * with the migration of free compaction scanner. The scanners then need to
 * use only pfn_valid_within() check for arches that allow holes within
 * pageblocks.
 *
 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
 *
 * It's possible on some configurations to have a setup like node0 node1 node0
 * i.e. it's possible that all pages within a zones range of pages do not
 * belong to a single zone. We assume that a border between node0 and node1
 * can occur within a single pageblock, but not a node0 node1 node0
 * interleaving within a single pageblock. It is therefore sufficient to check
 * the first and last page of a pageblock and avoid checking each individual
 * page in a pageblock.
 */
static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
				unsigned long end_pfn, struct zone *zone)
{
	struct page *start_page;
	struct page *end_page;

	/* end_pfn is one past the range we are checking */
	end_pfn--;

	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
		return NULL;

	start_page = pfn_to_page(start_pfn);

	if (page_zone(start_page) != zone)
		return NULL;

	end_page = pfn_to_page(end_pfn);

	/* This gives a shorter code than deriving page_zone(end_page) */
	if (page_zone_id(start_page) != page_zone_id(end_page))
		return NULL;

	return start_page;
}

117
#ifdef CONFIG_COMPACTION
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188

/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6

/*
 * Compaction is deferred when compaction fails to result in a page
 * allocation success. 1 << compact_defer_limit compactions are skipped up
 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
 */
void defer_compaction(struct zone *zone, int order)
{
	zone->compact_considered = 0;
	zone->compact_defer_shift++;

	if (order < zone->compact_order_failed)
		zone->compact_order_failed = order;

	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;

	trace_mm_compaction_defer_compaction(zone, order);
}

/* Returns true if compaction should be skipped this time */
bool compaction_deferred(struct zone *zone, int order)
{
	unsigned long defer_limit = 1UL << zone->compact_defer_shift;

	if (order < zone->compact_order_failed)
		return false;

	/* Avoid possible overflow */
	if (++zone->compact_considered > defer_limit)
		zone->compact_considered = defer_limit;

	if (zone->compact_considered >= defer_limit)
		return false;

	trace_mm_compaction_deferred(zone, order);

	return true;
}

/*
 * Update defer tracking counters after successful compaction of given order,
 * which means an allocation either succeeded (alloc_success == true) or is
 * expected to succeed.
 */
void compaction_defer_reset(struct zone *zone, int order,
		bool alloc_success)
{
	if (alloc_success) {
		zone->compact_considered = 0;
		zone->compact_defer_shift = 0;
	}
	if (order >= zone->compact_order_failed)
		zone->compact_order_failed = order + 1;

	trace_mm_compaction_defer_reset(zone, order);
}

/* Returns true if restarting compaction after many failures */
bool compaction_restarting(struct zone *zone, int order)
{
	if (order < zone->compact_order_failed)
		return false;

	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
		zone->compact_considered >= 1UL << zone->compact_defer_shift;
}

189 190 191 192 193 194 195 196 197 198
/* Returns true if the pageblock should be scanned for pages to isolate. */
static inline bool isolation_suitable(struct compact_control *cc,
					struct page *page)
{
	if (cc->ignore_skip_hint)
		return true;

	return !get_pageblock_skip(page);
}

199 200 201 202 203 204 205
static void reset_cached_positions(struct zone *zone)
{
	zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
	zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
	zone->compact_cached_free_pfn = zone_end_pfn(zone);
}

206 207 208 209 210
/*
 * This function is called to clear all cached information on pageblocks that
 * should be skipped for page isolation when the migrate and free page scanner
 * meet.
 */
211
static void __reset_isolation_suitable(struct zone *zone)
212 213
{
	unsigned long start_pfn = zone->zone_start_pfn;
214
	unsigned long end_pfn = zone_end_pfn(zone);
215 216
	unsigned long pfn;

217
	zone->compact_blockskip_flush = false;
218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233

	/* Walk the zone and mark every pageblock as suitable for isolation */
	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
		struct page *page;

		cond_resched();

		if (!pfn_valid(pfn))
			continue;

		page = pfn_to_page(pfn);
		if (zone != page_zone(page))
			continue;

		clear_pageblock_skip(page);
	}
234 235

	reset_cached_positions(zone);
236 237
}

238 239 240 241 242 243 244 245 246 247 248 249 250 251 252
void reset_isolation_suitable(pg_data_t *pgdat)
{
	int zoneid;

	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
		struct zone *zone = &pgdat->node_zones[zoneid];
		if (!populated_zone(zone))
			continue;

		/* Only flush if a full compaction finished recently */
		if (zone->compact_blockskip_flush)
			__reset_isolation_suitable(zone);
	}
}

253 254
/*
 * If no pages were isolated then mark this pageblock to be skipped in the
255
 * future. The information is later cleared by __reset_isolation_suitable().
256
 */
257 258
static void update_pageblock_skip(struct compact_control *cc,
			struct page *page, unsigned long nr_isolated,
259
			bool migrate_scanner)
260
{
261
	struct zone *zone = cc->zone;
262
	unsigned long pfn;
263 264 265 266

	if (cc->ignore_skip_hint)
		return;

267 268 269
	if (!page)
		return;

270 271 272
	if (nr_isolated)
		return;

273
	set_pageblock_skip(page);
274

275 276 277 278 279 280
	pfn = page_to_pfn(page);

	/* Update where async and sync compaction should restart */
	if (migrate_scanner) {
		if (pfn > zone->compact_cached_migrate_pfn[0])
			zone->compact_cached_migrate_pfn[0] = pfn;
281 282
		if (cc->mode != MIGRATE_ASYNC &&
		    pfn > zone->compact_cached_migrate_pfn[1])
283 284 285 286
			zone->compact_cached_migrate_pfn[1] = pfn;
	} else {
		if (pfn < zone->compact_cached_free_pfn)
			zone->compact_cached_free_pfn = pfn;
287
	}
288 289 290 291 292 293 294 295
}
#else
static inline bool isolation_suitable(struct compact_control *cc,
					struct page *page)
{
	return true;
}

296 297
static void update_pageblock_skip(struct compact_control *cc,
			struct page *page, unsigned long nr_isolated,
298
			bool migrate_scanner)
299 300 301 302
{
}
#endif /* CONFIG_COMPACTION */

303 304 305 306 307 308 309 310 311 312
/*
 * Compaction requires the taking of some coarse locks that are potentially
 * very heavily contended. For async compaction, back out if the lock cannot
 * be taken immediately. For sync compaction, spin on the lock if needed.
 *
 * Returns true if the lock is held
 * Returns false if the lock is not held and compaction should abort
 */
static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
						struct compact_control *cc)
313
{
314 315 316 317 318 319 320 321
	if (cc->mode == MIGRATE_ASYNC) {
		if (!spin_trylock_irqsave(lock, *flags)) {
			cc->contended = COMPACT_CONTENDED_LOCK;
			return false;
		}
	} else {
		spin_lock_irqsave(lock, *flags);
	}
322

323
	return true;
324 325
}

326 327
/*
 * Compaction requires the taking of some coarse locks that are potentially
328 329 330 331 332 333 334
 * very heavily contended. The lock should be periodically unlocked to avoid
 * having disabled IRQs for a long time, even when there is nobody waiting on
 * the lock. It might also be that allowing the IRQs will result in
 * need_resched() becoming true. If scheduling is needed, async compaction
 * aborts. Sync compaction schedules.
 * Either compaction type will also abort if a fatal signal is pending.
 * In either case if the lock was locked, it is dropped and not regained.
335
 *
336 337 338 339
 * Returns true if compaction should abort due to fatal signal pending, or
 *		async compaction due to need_resched()
 * Returns false when compaction can continue (sync compaction might have
 *		scheduled)
340
 */
341 342
static bool compact_unlock_should_abort(spinlock_t *lock,
		unsigned long flags, bool *locked, struct compact_control *cc)
343
{
344 345 346 347
	if (*locked) {
		spin_unlock_irqrestore(lock, flags);
		*locked = false;
	}
348

349 350 351 352
	if (fatal_signal_pending(current)) {
		cc->contended = COMPACT_CONTENDED_SCHED;
		return true;
	}
353

354
	if (need_resched()) {
355
		if (cc->mode == MIGRATE_ASYNC) {
356 357
			cc->contended = COMPACT_CONTENDED_SCHED;
			return true;
358 359 360 361
		}
		cond_resched();
	}

362
	return false;
363 364
}

365 366 367
/*
 * Aside from avoiding lock contention, compaction also periodically checks
 * need_resched() and either schedules in sync compaction or aborts async
368
 * compaction. This is similar to what compact_unlock_should_abort() does, but
369 370 371 372 373 374 375 376 377 378
 * is used where no lock is concerned.
 *
 * Returns false when no scheduling was needed, or sync compaction scheduled.
 * Returns true when async compaction should abort.
 */
static inline bool compact_should_abort(struct compact_control *cc)
{
	/* async compaction aborts if contended */
	if (need_resched()) {
		if (cc->mode == MIGRATE_ASYNC) {
379
			cc->contended = COMPACT_CONTENDED_SCHED;
380 381 382 383 384 385 386 387 388
			return true;
		}

		cond_resched();
	}

	return false;
}

389
/*
390 391 392
 * Isolate free pages onto a private freelist. If @strict is true, will abort
 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
 * (even though it may still end up isolating some pages).
393
 */
394
static unsigned long isolate_freepages_block(struct compact_control *cc,
395
				unsigned long *start_pfn,
396 397 398
				unsigned long end_pfn,
				struct list_head *freelist,
				bool strict)
399
{
400
	int nr_scanned = 0, total_isolated = 0;
401
	struct page *cursor, *valid_page = NULL;
402
	unsigned long flags = 0;
403
	bool locked = false;
404
	unsigned long blockpfn = *start_pfn;
405 406 407

	cursor = pfn_to_page(blockpfn);

408
	/* Isolate free pages. */
409 410 411 412
	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
		int isolated, i;
		struct page *page = cursor;

413 414 415 416 417 418 419 420 421 422
		/*
		 * Periodically drop the lock (if held) regardless of its
		 * contention, to give chance to IRQs. Abort if fatal signal
		 * pending or async compaction detects need_resched()
		 */
		if (!(blockpfn % SWAP_CLUSTER_MAX)
		    && compact_unlock_should_abort(&cc->zone->lock, flags,
								&locked, cc))
			break;

423
		nr_scanned++;
424
		if (!pfn_valid_within(blockpfn))
425 426
			goto isolate_fail;

427 428
		if (!valid_page)
			valid_page = page;
429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446

		/*
		 * For compound pages such as THP and hugetlbfs, we can save
		 * potentially a lot of iterations if we skip them at once.
		 * The check is racy, but we can consider only valid values
		 * and the only danger is skipping too much.
		 */
		if (PageCompound(page)) {
			unsigned int comp_order = compound_order(page);

			if (likely(comp_order < MAX_ORDER)) {
				blockpfn += (1UL << comp_order) - 1;
				cursor += (1UL << comp_order) - 1;
			}

			goto isolate_fail;
		}

447
		if (!PageBuddy(page))
448
			goto isolate_fail;
449 450

		/*
451 452 453 454 455
		 * If we already hold the lock, we can skip some rechecking.
		 * Note that if we hold the lock now, checked_pageblock was
		 * already set in some previous iteration (or strict is true),
		 * so it is correct to skip the suitable migration target
		 * recheck as well.
456
		 */
457 458 459 460 461 462 463 464 465
		if (!locked) {
			/*
			 * The zone lock must be held to isolate freepages.
			 * Unfortunately this is a very coarse lock and can be
			 * heavily contended if there are parallel allocations
			 * or parallel compactions. For async compaction do not
			 * spin on the lock and we acquire the lock as late as
			 * possible.
			 */
466 467
			locked = compact_trylock_irqsave(&cc->zone->lock,
								&flags, cc);
468 469
			if (!locked)
				break;
470

471 472 473 474
			/* Recheck this is a buddy page under lock */
			if (!PageBuddy(page))
				goto isolate_fail;
		}
475 476 477 478 479 480 481 482 483 484 485

		/* Found a free page, break it into order-0 pages */
		isolated = split_free_page(page);
		total_isolated += isolated;
		for (i = 0; i < isolated; i++) {
			list_add(&page->lru, freelist);
			page++;
		}

		/* If a page was split, advance to the end of it */
		if (isolated) {
486 487 488 489 490 491 492
			cc->nr_freepages += isolated;
			if (!strict &&
				cc->nr_migratepages <= cc->nr_freepages) {
				blockpfn += isolated;
				break;
			}

493 494
			blockpfn += isolated - 1;
			cursor += isolated - 1;
495
			continue;
496
		}
497 498 499 500 501 502 503

isolate_fail:
		if (strict)
			break;
		else
			continue;

504 505
	}

506 507 508 509 510 511 512
	/*
	 * There is a tiny chance that we have read bogus compound_order(),
	 * so be careful to not go outside of the pageblock.
	 */
	if (unlikely(blockpfn > end_pfn))
		blockpfn = end_pfn;

513 514 515
	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
					nr_scanned, total_isolated);

516 517 518
	/* Record how far we have got within the block */
	*start_pfn = blockpfn;

519 520 521 522 523
	/*
	 * If strict isolation is requested by CMA then check that all the
	 * pages requested were isolated. If there were any failures, 0 is
	 * returned and CMA will fail.
	 */
524
	if (strict && blockpfn < end_pfn)
525 526 527 528 529
		total_isolated = 0;

	if (locked)
		spin_unlock_irqrestore(&cc->zone->lock, flags);

530 531
	/* Update the pageblock-skip if the whole pageblock was scanned */
	if (blockpfn == end_pfn)
532
		update_pageblock_skip(cc, valid_page, total_isolated, false);
533

534
	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
535
	if (total_isolated)
536
		count_compact_events(COMPACTISOLATED, total_isolated);
537 538 539
	return total_isolated;
}

540 541 542 543 544 545 546 547 548 549 550 551 552
/**
 * isolate_freepages_range() - isolate free pages.
 * @start_pfn: The first PFN to start isolating.
 * @end_pfn:   The one-past-last PFN.
 *
 * Non-free pages, invalid PFNs, or zone boundaries within the
 * [start_pfn, end_pfn) range are considered errors, cause function to
 * undo its actions and return zero.
 *
 * Otherwise, function returns one-past-the-last PFN of isolated page
 * (which may be greater then end_pfn if end fell in a middle of
 * a free page).
 */
553
unsigned long
554 555
isolate_freepages_range(struct compact_control *cc,
			unsigned long start_pfn, unsigned long end_pfn)
556
{
557
	unsigned long isolated, pfn, block_end_pfn;
558 559
	LIST_HEAD(freelist);

560 561 562 563 564
	pfn = start_pfn;
	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);

	for (; pfn < end_pfn; pfn += isolated,
				block_end_pfn += pageblock_nr_pages) {
565 566
		/* Protect pfn from changing by isolate_freepages_block */
		unsigned long isolate_start_pfn = pfn;
567 568 569

		block_end_pfn = min(block_end_pfn, end_pfn);

570 571 572 573 574 575 576 577 578 579
		/*
		 * pfn could pass the block_end_pfn if isolated freepage
		 * is more than pageblock order. In this case, we adjust
		 * scanning range to right one.
		 */
		if (pfn >= block_end_pfn) {
			block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
			block_end_pfn = min(block_end_pfn, end_pfn);
		}

580 581 582
		if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
			break;

583 584
		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
						block_end_pfn, &freelist, true);
585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613

		/*
		 * In strict mode, isolate_freepages_block() returns 0 if
		 * there are any holes in the block (ie. invalid PFNs or
		 * non-free pages).
		 */
		if (!isolated)
			break;

		/*
		 * If we managed to isolate pages, it is always (1 << n) *
		 * pageblock_nr_pages for some non-negative n.  (Max order
		 * page may span two pageblocks).
		 */
	}

	/* split_free_page does not map the pages */
	map_pages(&freelist);

	if (pfn < end_pfn) {
		/* Loop terminated early, cleanup. */
		release_freepages(&freelist);
		return 0;
	}

	/* We don't use freelists for anything. */
	return pfn;
}

614
/* Update the number of anon and file isolated pages in the zone */
615
static void acct_isolated(struct zone *zone, struct compact_control *cc)
616 617
{
	struct page *page;
618
	unsigned int count[2] = { 0, };
619

620 621 622
	if (list_empty(&cc->migratepages))
		return;

623 624
	list_for_each_entry(page, &cc->migratepages, lru)
		count[!!page_is_file_cache(page)]++;
625

626 627
	mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
	mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
628 629 630 631 632
}

/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct zone *zone)
{
633
	unsigned long active, inactive, isolated;
634 635 636

	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
					zone_page_state(zone, NR_INACTIVE_ANON);
637 638
	active = zone_page_state(zone, NR_ACTIVE_FILE) +
					zone_page_state(zone, NR_ACTIVE_ANON);
639 640 641
	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
					zone_page_state(zone, NR_ISOLATED_ANON);

642
	return isolated > (inactive + active) / 2;
643 644
}

645
/**
646 647
 * isolate_migratepages_block() - isolate all migrate-able pages within
 *				  a single pageblock
648
 * @cc:		Compaction control structure.
649 650 651
 * @low_pfn:	The first PFN to isolate
 * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
 * @isolate_mode: Isolation mode to be used.
652 653
 *
 * Isolate all pages that can be migrated from the range specified by
654 655 656 657
 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
 * Returns zero if there is a fatal signal pending, otherwise PFN of the
 * first page that was not scanned (which may be both less, equal to or more
 * than end_pfn).
658
 *
659 660 661
 * The pages are isolated on cc->migratepages list (not required to be empty),
 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
 * is neither read nor updated.
662
 */
663 664 665
static unsigned long
isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
			unsigned long end_pfn, isolate_mode_t isolate_mode)
666
{
667
	struct zone *zone = cc->zone;
668
	unsigned long nr_scanned = 0, nr_isolated = 0;
669
	struct list_head *migratelist = &cc->migratepages;
670
	struct lruvec *lruvec;
671
	unsigned long flags = 0;
672
	bool locked = false;
673
	struct page *page = NULL, *valid_page = NULL;
674
	unsigned long start_pfn = low_pfn;
675 676 677 678 679 680 681

	/*
	 * Ensure that there are not too many pages isolated from the LRU
	 * list by either parallel reclaimers or compaction. If there are,
	 * delay for some time until fewer pages are isolated
	 */
	while (unlikely(too_many_isolated(zone))) {
682
		/* async migration should just abort */
683
		if (cc->mode == MIGRATE_ASYNC)
684
			return 0;
685

686 687 688
		congestion_wait(BLK_RW_ASYNC, HZ/10);

		if (fatal_signal_pending(current))
689
			return 0;
690 691
	}

692 693
	if (compact_should_abort(cc))
		return 0;
694

695 696
	/* Time to isolate some pages for migration */
	for (; low_pfn < end_pfn; low_pfn++) {
697 698
		bool is_lru;

699 700 701 702 703 704 705 706 707
		/*
		 * Periodically drop the lock (if held) regardless of its
		 * contention, to give chance to IRQs. Abort async compaction
		 * if contended.
		 */
		if (!(low_pfn % SWAP_CLUSTER_MAX)
		    && compact_unlock_should_abort(&zone->lru_lock, flags,
								&locked, cc))
			break;
708

709 710
		if (!pfn_valid_within(low_pfn))
			continue;
711
		nr_scanned++;
712 713

		page = pfn_to_page(low_pfn);
714

715 716 717
		if (!valid_page)
			valid_page = page;

718
		/*
719 720 721 722
		 * Skip if free. We read page order here without zone lock
		 * which is generally unsafe, but the race window is small and
		 * the worst thing that can happen is that we skip some
		 * potential isolation targets.
723
		 */
724 725 726 727 728 729 730 731 732 733
		if (PageBuddy(page)) {
			unsigned long freepage_order = page_order_unsafe(page);

			/*
			 * Without lock, we cannot be sure that what we got is
			 * a valid page order. Consider only values in the
			 * valid order range to prevent low_pfn overflow.
			 */
			if (freepage_order > 0 && freepage_order < MAX_ORDER)
				low_pfn += (1UL << freepage_order) - 1;
734
			continue;
735
		}
736

737 738 739 740 741
		/*
		 * Check may be lockless but that's ok as we recheck later.
		 * It's possible to migrate LRU pages and balloon pages
		 * Skip any other type of page
		 */
742 743
		is_lru = PageLRU(page);
		if (!is_lru) {
744
			if (unlikely(balloon_page_movable(page))) {
745
				if (balloon_page_isolate(page)) {
746
					/* Successfully isolated */
747
					goto isolate_success;
748 749 750
				}
			}
		}
751 752

		/*
753 754 755 756 757
		 * Regardless of being on LRU, compound pages such as THP and
		 * hugetlbfs are not to be compacted. We can potentially save
		 * a lot of iterations if we skip them at once. The check is
		 * racy, but we can consider only valid values and the only
		 * danger is skipping too much.
758
		 */
759 760 761 762 763
		if (PageCompound(page)) {
			unsigned int comp_order = compound_order(page);

			if (likely(comp_order < MAX_ORDER))
				low_pfn += (1UL << comp_order) - 1;
764

765 766 767
			continue;
		}

768 769 770
		if (!is_lru)
			continue;

771 772 773 774 775 776 777 778 779
		/*
		 * Migration will fail if an anonymous page is pinned in memory,
		 * so avoid taking lru_lock and isolating it unnecessarily in an
		 * admittedly racy check.
		 */
		if (!page_mapping(page) &&
		    page_count(page) > page_mapcount(page))
			continue;

780 781
		/* If we already hold the lock, we can skip some rechecking */
		if (!locked) {
782 783
			locked = compact_trylock_irqsave(&zone->lru_lock,
								&flags, cc);
784 785
			if (!locked)
				break;
786

787
			/* Recheck PageLRU and PageCompound under lock */
788 789
			if (!PageLRU(page))
				continue;
790 791 792 793 794 795 796 797

			/*
			 * Page become compound since the non-locked check,
			 * and it's on LRU. It can only be a THP so the order
			 * is safe to read and it's 0 for tail pages.
			 */
			if (unlikely(PageCompound(page))) {
				low_pfn += (1UL << compound_order(page)) - 1;
798 799
				continue;
			}
800 801
		}

802 803
		lruvec = mem_cgroup_page_lruvec(page, zone);

804
		/* Try isolate the page */
805
		if (__isolate_lru_page(page, isolate_mode) != 0)
806 807
			continue;

808
		VM_BUG_ON_PAGE(PageCompound(page), page);
809

810
		/* Successfully isolated */
811
		del_page_from_lru_list(page, lruvec, page_lru(page));
812 813

isolate_success:
814 815
		list_add(&page->lru, migratelist);
		cc->nr_migratepages++;
816
		nr_isolated++;
817 818

		/* Avoid isolating too much */
819 820
		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
			++low_pfn;
821
			break;
822
		}
823 824
	}

825 826 827 828 829 830 831
	/*
	 * The PageBuddy() check could have potentially brought us outside
	 * the range to be scanned.
	 */
	if (unlikely(low_pfn > end_pfn))
		low_pfn = end_pfn;

832 833
	if (locked)
		spin_unlock_irqrestore(&zone->lru_lock, flags);
834

835 836 837 838
	/*
	 * Update the pageblock-skip information and cached scanner pfn,
	 * if the whole pageblock was scanned without isolating any page.
	 */
839
	if (low_pfn == end_pfn)
840
		update_pageblock_skip(cc, valid_page, nr_isolated, true);
841

842 843
	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
						nr_scanned, nr_isolated);
844

845
	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
846
	if (nr_isolated)
847
		count_compact_events(COMPACTISOLATED, nr_isolated);
848

849 850 851
	return low_pfn;
}

852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876
/**
 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
 * @cc:        Compaction control structure.
 * @start_pfn: The first PFN to start isolating.
 * @end_pfn:   The one-past-last PFN.
 *
 * Returns zero if isolation fails fatally due to e.g. pending signal.
 * Otherwise, function returns one-past-the-last PFN of isolated page
 * (which may be greater than end_pfn if end fell in a middle of a THP page).
 */
unsigned long
isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
							unsigned long end_pfn)
{
	unsigned long pfn, block_end_pfn;

	/* Scan block by block. First and last block may be incomplete */
	pfn = start_pfn;
	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);

	for (; pfn < end_pfn; pfn = block_end_pfn,
				block_end_pfn += pageblock_nr_pages) {

		block_end_pfn = min(block_end_pfn, end_pfn);

877
		if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
878 879 880 881 882 883 884 885 886 887 888 889 890 891 892
			continue;

		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
							ISOLATE_UNEVICTABLE);

		/*
		 * In case of fatal failure, release everything that might
		 * have been isolated in the previous iteration, and signal
		 * the failure back to caller.
		 */
		if (!pfn) {
			putback_movable_pages(&cc->migratepages);
			cc->nr_migratepages = 0;
			break;
		}
893 894 895

		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
			break;
896 897 898 899 900 901
	}
	acct_isolated(cc->zone, cc);

	return pfn;
}

902 903
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926

/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
{
	/* If the page is a large free page, then disallow migration */
	if (PageBuddy(page)) {
		/*
		 * We are checking page_order without zone->lock taken. But
		 * the only small danger is that we skip a potentially suitable
		 * pageblock, so it's not worth to check order for valid range.
		 */
		if (page_order_unsafe(page) >= pageblock_order)
			return false;
	}

	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
	if (migrate_async_suitable(get_pageblock_migratetype(page)))
		return true;

	/* Otherwise skip the block */
	return false;
}

927 928 929 930 931 932 933 934 935 936
/*
 * Test whether the free scanner has reached the same or lower pageblock than
 * the migration scanner, and compaction should thus terminate.
 */
static inline bool compact_scanners_met(struct compact_control *cc)
{
	return (cc->free_pfn >> pageblock_order)
		<= (cc->migrate_pfn >> pageblock_order);
}

937
/*
938 939
 * Based on information in the current compact_control, find blocks
 * suitable for isolating free pages from and then isolate them.
940
 */
941
static void isolate_freepages(struct compact_control *cc)
942
{
943
	struct zone *zone = cc->zone;
944
	struct page *page;
945
	unsigned long block_start_pfn;	/* start of current pageblock */
946
	unsigned long isolate_start_pfn; /* exact pfn we start at */
947 948
	unsigned long block_end_pfn;	/* end of current pageblock */
	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
949
	struct list_head *freelist = &cc->freepages;
950

951 952
	/*
	 * Initialise the free scanner. The starting point is where we last
953
	 * successfully isolated from, zone-cached value, or the end of the
954 955
	 * zone when isolating for the first time. For looping we also need
	 * this pfn aligned down to the pageblock boundary, because we do
956 957 958
	 * block_start_pfn -= pageblock_nr_pages in the for loop.
	 * For ending point, take care when isolating in last pageblock of a
	 * a zone which ends in the middle of a pageblock.
959 960
	 * The low boundary is the end of the pageblock the migration scanner
	 * is using.
961
	 */
962
	isolate_start_pfn = cc->free_pfn;
963 964 965
	block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
						zone_end_pfn(zone));
966
	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
967

968 969 970 971 972
	/*
	 * Isolate free pages until enough are available to migrate the
	 * pages on cc->migratepages. We stop searching if the migrate
	 * and free page scanners meet or enough free pages are isolated.
	 */
973
	for (; block_start_pfn >= low_pfn;
974
				block_end_pfn = block_start_pfn,
975 976
				block_start_pfn -= pageblock_nr_pages,
				isolate_start_pfn = block_start_pfn) {
977

978 979 980
		/*
		 * This can iterate a massively long zone without finding any
		 * suitable migration targets, so periodically check if we need
981
		 * to schedule, or even abort async compaction.
982
		 */
983 984 985
		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
						&& compact_should_abort(cc))
			break;
986

987 988 989
		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
									zone);
		if (!page)
990 991 992
			continue;

		/* Check the block is suitable for migration */
993
		if (!suitable_migration_target(page))
994
			continue;
995

996 997 998 999
		/* If isolation recently failed, do not retry */
		if (!isolation_suitable(cc, page))
			continue;

1000
		/* Found a block suitable for isolating free pages from. */
1001
		isolate_freepages_block(cc, &isolate_start_pfn,
1002
					block_end_pfn, freelist, false);
1003

1004
		/*
1005 1006
		 * If we isolated enough freepages, or aborted due to async
		 * compaction being contended, terminate the loop.
1007 1008 1009 1010 1011 1012 1013 1014
		 * Remember where the free scanner should restart next time,
		 * which is where isolate_freepages_block() left off.
		 * But if it scanned the whole pageblock, isolate_start_pfn
		 * now points at block_end_pfn, which is the start of the next
		 * pageblock.
		 * In that case we will however want to restart at the start
		 * of the previous pageblock.
		 */
1015 1016 1017 1018 1019
		if ((cc->nr_freepages >= cc->nr_migratepages)
							|| cc->contended) {
			if (isolate_start_pfn >= block_end_pfn)
				isolate_start_pfn =
					block_start_pfn - pageblock_nr_pages;
1020
			break;
1021 1022 1023 1024 1025 1026 1027
		} else {
			/*
			 * isolate_freepages_block() should not terminate
			 * prematurely unless contended, or isolated enough
			 */
			VM_BUG_ON(isolate_start_pfn < block_end_pfn);
		}
1028 1029 1030 1031 1032
	}

	/* split_free_page does not map the pages */
	map_pages(freelist);

1033
	/*
1034 1035 1036 1037
	 * Record where the free scanner will restart next time. Either we
	 * broke from the loop and set isolate_start_pfn based on the last
	 * call to isolate_freepages_block(), or we met the migration scanner
	 * and the loop terminated due to isolate_start_pfn < low_pfn
1038
	 */
1039
	cc->free_pfn = isolate_start_pfn;
1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052
}

/*
 * This is a migrate-callback that "allocates" freepages by taking pages
 * from the isolated freelists in the block we are migrating to.
 */
static struct page *compaction_alloc(struct page *migratepage,
					unsigned long data,
					int **result)
{
	struct compact_control *cc = (struct compact_control *)data;
	struct page *freepage;

1053 1054 1055 1056
	/*
	 * Isolate free pages if necessary, and if we are not aborting due to
	 * contention.
	 */
1057
	if (list_empty(&cc->freepages)) {
1058
		if (!cc->contended)
1059
			isolate_freepages(cc);
1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072

		if (list_empty(&cc->freepages))
			return NULL;
	}

	freepage = list_entry(cc->freepages.next, struct page, lru);
	list_del(&freepage->lru);
	cc->nr_freepages--;

	return freepage;
}

/*
1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084
 * This is a migrate-callback that "frees" freepages back to the isolated
 * freelist.  All pages on the freelist are from the same zone, so there is no
 * special handling needed for NUMA.
 */
static void compaction_free(struct page *page, unsigned long data)
{
	struct compact_control *cc = (struct compact_control *)data;

	list_add(&page->lru, &cc->freepages);
	cc->nr_freepages++;
}

1085 1086 1087 1088 1089 1090 1091
/* possible outcome of isolate_migratepages */
typedef enum {
	ISOLATE_ABORT,		/* Abort compaction now */
	ISOLATE_NONE,		/* No pages isolated, continue scanning */
	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
} isolate_migrate_t;

1092 1093 1094 1095 1096 1097
/*
 * Allow userspace to control policy on scanning the unevictable LRU for
 * compactable pages.
 */
int sysctl_compact_unevictable_allowed __read_mostly = 1;

1098
/*
1099 1100 1101
 * Isolate all pages that can be migrated from the first suitable block,
 * starting at the block pointed to by the migrate scanner pfn within
 * compact_control.
1102 1103 1104 1105 1106
 */
static isolate_migrate_t isolate_migratepages(struct zone *zone,
					struct compact_control *cc)
{
	unsigned long low_pfn, end_pfn;
1107
	unsigned long isolate_start_pfn;
1108 1109
	struct page *page;
	const isolate_mode_t isolate_mode =
1110
		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1111
		(cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1112

1113 1114 1115 1116 1117
	/*
	 * Start at where we last stopped, or beginning of the zone as
	 * initialized by compact_zone()
	 */
	low_pfn = cc->migrate_pfn;
1118 1119

	/* Only scan within a pageblock boundary */
1120
	end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
1121

1122 1123 1124 1125 1126 1127
	/*
	 * Iterate over whole pageblocks until we find the first suitable.
	 * Do not cross the free scanner.
	 */
	for (; end_pfn <= cc->free_pfn;
			low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
1128

1129 1130 1131 1132 1133 1134 1135 1136
		/*
		 * This can potentially iterate a massively long zone with
		 * many pageblocks unsuitable, so periodically check if we
		 * need to schedule, or even abort async compaction.
		 */
		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
						&& compact_should_abort(cc))
			break;
1137

1138 1139
		page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
		if (!page)
1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155
			continue;

		/* If isolation recently failed, do not retry */
		if (!isolation_suitable(cc, page))
			continue;

		/*
		 * For async compaction, also only scan in MOVABLE blocks.
		 * Async compaction is optimistic to see if the minimum amount
		 * of work satisfies the allocation.
		 */
		if (cc->mode == MIGRATE_ASYNC &&
		    !migrate_async_suitable(get_pageblock_migratetype(page)))
			continue;

		/* Perform the isolation */
1156
		isolate_start_pfn = low_pfn;
1157 1158 1159
		low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
								isolate_mode);

1160 1161
		if (!low_pfn || cc->contended) {
			acct_isolated(zone, cc);
1162
			return ISOLATE_ABORT;
1163
		}
1164

1165 1166 1167 1168 1169 1170 1171 1172 1173
		/*
		 * Record where we could have freed pages by migration and not
		 * yet flushed them to buddy allocator.
		 * - this is the lowest page that could have been isolated and
		 * then freed by migration.
		 */
		if (cc->nr_migratepages && !cc->last_migrated_pfn)
			cc->last_migrated_pfn = isolate_start_pfn;

1174 1175 1176 1177 1178 1179 1180 1181 1182
		/*
		 * Either we isolated something and proceed with migration. Or
		 * we failed and compact_zone should decide if we should
		 * continue or not.
		 */
		break;
	}

	acct_isolated(zone, cc);
1183 1184
	/* Record where migration scanner will be restarted. */
	cc->migrate_pfn = low_pfn;
1185

1186
	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1187 1188
}

1189 1190 1191 1192 1193 1194 1195 1196 1197
/*
 * order == -1 is expected when compacting via
 * /proc/sys/vm/compact_memory
 */
static inline bool is_via_compact_memory(int order)
{
	return order == -1;
}

1198
static int __compact_finished(struct zone *zone, struct compact_control *cc,
1199
			    const int migratetype)
1200
{
1201
	unsigned int order;
1202
	unsigned long watermark;
1203

1204
	if (cc->contended || fatal_signal_pending(current))
1205 1206
		return COMPACT_PARTIAL;

1207
	/* Compaction run completes if the migrate and free scanner meet */
1208
	if (compact_scanners_met(cc)) {
1209
		/* Let the next compaction start anew. */
1210
		reset_cached_positions(zone);
1211

1212 1213 1214 1215 1216 1217 1218 1219 1220
		/*
		 * Mark that the PG_migrate_skip information should be cleared
		 * by kswapd when it goes to sleep. kswapd does not set the
		 * flag itself as the decision to be clear should be directly
		 * based on an allocation request.
		 */
		if (!current_is_kswapd())
			zone->compact_blockskip_flush = true;

1221
		return COMPACT_COMPLETE;
1222
	}
1223

1224
	if (is_via_compact_memory(cc->order))
1225 1226
		return COMPACT_CONTINUE;

1227 1228 1229
	/* Compaction run is not finished if the watermark is not met */
	watermark = low_wmark_pages(zone);

1230 1231
	if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
							cc->alloc_flags))
1232 1233
		return COMPACT_CONTINUE;

1234
	/* Direct compactor: Is a suitable page free? */
1235 1236
	for (order = cc->order; order < MAX_ORDER; order++) {
		struct free_area *area = &zone->free_area[order];
1237
		bool can_steal;
1238 1239

		/* Job done if page is free of the right migratetype */
1240
		if (!list_empty(&area->free_list[migratetype]))
1241 1242
			return COMPACT_PARTIAL;

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254
#ifdef CONFIG_CMA
		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
		if (migratetype == MIGRATE_MOVABLE &&
			!list_empty(&area->free_list[MIGRATE_CMA]))
			return COMPACT_PARTIAL;
#endif
		/*
		 * Job done if allocation would steal freepages from
		 * other migratetype buddy lists.
		 */
		if (find_suitable_fallback(area, order, migratetype,
						true, &can_steal) != -1)
1255 1256 1257
			return COMPACT_PARTIAL;
	}

1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271
	return COMPACT_NO_SUITABLE_PAGE;
}

static int compact_finished(struct zone *zone, struct compact_control *cc,
			    const int migratetype)
{
	int ret;

	ret = __compact_finished(zone, cc, migratetype);
	trace_mm_compaction_finished(zone, cc->order, ret);
	if (ret == COMPACT_NO_SUITABLE_PAGE)
		ret = COMPACT_CONTINUE;

	return ret;
1272 1273
}

1274 1275 1276 1277 1278 1279 1280
/*
 * compaction_suitable: Is this suitable to run compaction on this zone now?
 * Returns
 *   COMPACT_SKIPPED  - If there are too few free pages for compaction
 *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
 *   COMPACT_CONTINUE - If compaction should run now
 */
1281
static unsigned long __compaction_suitable(struct zone *zone, int order,
1282
					int alloc_flags, int classzone_idx)
1283 1284 1285 1286
{
	int fragindex;
	unsigned long watermark;

1287
	if (is_via_compact_memory(order))
1288 1289
		return COMPACT_CONTINUE;

1290 1291 1292 1293 1294 1295 1296 1297 1298
	watermark = low_wmark_pages(zone);
	/*
	 * If watermarks for high-order allocation are already met, there
	 * should be no need for compaction at all.
	 */
	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
								alloc_flags))
		return COMPACT_PARTIAL;

1299 1300 1301 1302 1303
	/*
	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
	 * This is because during migration, copies of pages need to be
	 * allocated and for a short time, the footprint is higher
	 */
1304 1305
	watermark += (2UL << order);
	if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
1306 1307 1308 1309 1310 1311
		return COMPACT_SKIPPED;

	/*
	 * fragmentation index determines if allocation failures are due to
	 * low memory or external fragmentation
	 *
1312 1313
	 * index of -1000 would imply allocations might succeed depending on
	 * watermarks, but we already failed the high-order watermark check
1314 1315 1316 1317 1318 1319 1320
	 * index towards 0 implies failure is due to lack of memory
	 * index towards 1000 implies failure is due to fragmentation
	 *
	 * Only compact if a failure would be due to fragmentation.
	 */
	fragindex = fragmentation_index(zone, order);
	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1321
		return COMPACT_NOT_SUITABLE_ZONE;
1322 1323 1324 1325

	return COMPACT_CONTINUE;
}

1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338
unsigned long compaction_suitable(struct zone *zone, int order,
					int alloc_flags, int classzone_idx)
{
	unsigned long ret;

	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
	trace_mm_compaction_suitable(zone, order, ret);
	if (ret == COMPACT_NOT_SUITABLE_ZONE)
		ret = COMPACT_SKIPPED;

	return ret;
}

1339 1340 1341
static int compact_zone(struct zone *zone, struct compact_control *cc)
{
	int ret;
1342
	unsigned long start_pfn = zone->zone_start_pfn;
1343
	unsigned long end_pfn = zone_end_pfn(zone);
1344
	const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1345
	const bool sync = cc->mode != MIGRATE_ASYNC;
1346

1347 1348
	ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
							cc->classzone_idx);
1349 1350 1351 1352 1353 1354 1355 1356 1357 1358
	switch (ret) {
	case COMPACT_PARTIAL:
	case COMPACT_SKIPPED:
		/* Compaction is likely to fail */
		return ret;
	case COMPACT_CONTINUE:
		/* Fall through to compaction */
		;
	}

1359 1360 1361 1362 1363 1364 1365 1366
	/*
	 * Clear pageblock skip if there were failures recently and compaction
	 * is about to be retried after being deferred. kswapd does not do
	 * this reset as it'll reset the cached information when going to sleep.
	 */
	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
		__reset_isolation_suitable(zone);

1367 1368 1369 1370 1371
	/*
	 * Setup to move all movable pages to the end of the zone. Used cached
	 * information on where the scanners should start but check that it
	 * is initialised by ensuring the values are within zone boundaries.
	 */
1372
	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1373 1374 1375 1376 1377 1378 1379
	cc->free_pfn = zone->compact_cached_free_pfn;
	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
		zone->compact_cached_free_pfn = cc->free_pfn;
	}
	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
		cc->migrate_pfn = start_pfn;
1380 1381
		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1382
	}
1383
	cc->last_migrated_pfn = 0;
1384

1385 1386
	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
				cc->free_pfn, end_pfn, sync);
1387

1388 1389
	migrate_prep_local();

1390 1391
	while ((ret = compact_finished(zone, cc, migratetype)) ==
						COMPACT_CONTINUE) {
1392
		int err;
1393

1394 1395 1396
		switch (isolate_migratepages(zone, cc)) {
		case ISOLATE_ABORT:
			ret = COMPACT_PARTIAL;
1397
			putback_movable_pages(&cc->migratepages);
1398
			cc->nr_migratepages = 0;
1399 1400
			goto out;
		case ISOLATE_NONE:
1401 1402 1403 1404 1405 1406
			/*
			 * We haven't isolated and migrated anything, but
			 * there might still be unflushed migrations from
			 * previous cc->order aligned block.
			 */
			goto check_drain;
1407 1408 1409
		case ISOLATE_SUCCESS:
			;
		}
1410

1411
		err = migrate_pages(&cc->migratepages, compaction_alloc,
1412
				compaction_free, (unsigned long)cc, cc->mode,
1413
				MR_COMPACTION);
1414

1415 1416
		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
							&cc->migratepages);
1417

1418 1419
		/* All pages were either migrated or will be released */
		cc->nr_migratepages = 0;
1420
		if (err) {
1421
			putback_movable_pages(&cc->migratepages);
1422 1423 1424 1425
			/*
			 * migrate_pages() may return -ENOMEM when scanners meet
			 * and we want compact_finished() to detect it
			 */
1426
			if (err == -ENOMEM && !compact_scanners_met(cc)) {
1427 1428 1429
				ret = COMPACT_PARTIAL;
				goto out;
			}
1430
		}
1431 1432 1433 1434 1435 1436 1437 1438 1439

check_drain:
		/*
		 * Has the migration scanner moved away from the previous
		 * cc->order aligned block where we migrated from? If yes,
		 * flush the pages that were freed, so that they can merge and
		 * compact_finished() can detect immediately if allocation
		 * would succeed.
		 */
1440
		if (cc->order > 0 && cc->last_migrated_pfn) {
1441 1442 1443 1444
			int cpu;
			unsigned long current_block_start =
				cc->migrate_pfn & ~((1UL << cc->order) - 1);

1445
			if (cc->last_migrated_pfn < current_block_start) {
1446 1447 1448 1449 1450
				cpu = get_cpu();
				lru_add_drain_cpu(cpu);
				drain_local_pages(zone);
				put_cpu();
				/* No more flushing until we migrate again */
1451
				cc->last_migrated_pfn = 0;
1452 1453 1454
			}
		}

1455 1456
	}

1457
out:
1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
	/*
	 * Release free pages and update where the free scanner should restart,
	 * so we don't leave any returned pages behind in the next attempt.
	 */
	if (cc->nr_freepages > 0) {
		unsigned long free_pfn = release_freepages(&cc->freepages);

		cc->nr_freepages = 0;
		VM_BUG_ON(free_pfn == 0);
		/* The cached pfn is always the first in a pageblock */
		free_pfn &= ~(pageblock_nr_pages-1);
		/*
		 * Only go back, not forward. The cached pfn might have been
		 * already reset to zone end in compact_finished()
		 */
		if (free_pfn > zone->compact_cached_free_pfn)
			zone->compact_cached_free_pfn = free_pfn;
	}
1476

1477 1478
	trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
				cc->free_pfn, end_pfn, sync, ret);
1479

1480 1481
	return ret;
}
1482

1483
static unsigned long compact_zone_order(struct zone *zone, int order,
1484 1485
		gfp_t gfp_mask, enum migrate_mode mode, int *contended,
		int alloc_flags, int classzone_idx)
1486
{
1487
	unsigned long ret;
1488 1489 1490 1491
	struct compact_control cc = {
		.nr_freepages = 0,
		.nr_migratepages = 0,
		.order = order,
1492
		.gfp_mask = gfp_mask,
1493
		.zone = zone,
1494
		.mode = mode,
1495 1496
		.alloc_flags = alloc_flags,
		.classzone_idx = classzone_idx,
1497 1498 1499 1500
	};
	INIT_LIST_HEAD(&cc.freepages);
	INIT_LIST_HEAD(&cc.migratepages);

1501 1502 1503 1504 1505 1506 1507
	ret = compact_zone(zone, &cc);

	VM_BUG_ON(!list_empty(&cc.freepages));
	VM_BUG_ON(!list_empty(&cc.migratepages));

	*contended = cc.contended;
	return ret;
1508 1509
}

1510 1511
int sysctl_extfrag_threshold = 500;

1512 1513 1514
/**
 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
 * @gfp_mask: The GFP mask of the current allocation
1515 1516 1517
 * @order: The order of the current allocation
 * @alloc_flags: The allocation flags of the current allocation
 * @ac: The context of current allocation
1518
 * @mode: The migration mode for async, sync light, or sync migration
1519 1520
 * @contended: Return value that determines if compaction was aborted due to
 *	       need_resched() or lock contention
1521 1522 1523
 *
 * This is the main entry point for direct page compaction.
 */
1524 1525 1526
unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
			int alloc_flags, const struct alloc_context *ac,
			enum migrate_mode mode, int *contended)
1527 1528 1529 1530 1531
{
	int may_enter_fs = gfp_mask & __GFP_FS;
	int may_perform_io = gfp_mask & __GFP_IO;
	struct zoneref *z;
	struct zone *zone;
1532
	int rc = COMPACT_DEFERRED;
1533 1534 1535
	int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */

	*contended = COMPACT_CONTENDED_NONE;
1536

1537
	/* Check if the GFP flags allow compaction */
1538
	if (!order || !may_enter_fs || !may_perform_io)
1539
		return COMPACT_SKIPPED;
1540

1541 1542
	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);

1543
	/* Compact each zone in the list */
1544 1545
	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
								ac->nodemask) {
1546
		int status;
1547
		int zone_contended;
1548

1549 1550 1551
		if (compaction_deferred(zone, order))
			continue;

1552
		status = compact_zone_order(zone, order, gfp_mask, mode,
1553 1554
				&zone_contended, alloc_flags,
				ac->classzone_idx);
1555
		rc = max(status, rc);
1556 1557 1558 1559 1560
		/*
		 * It takes at least one zone that wasn't lock contended
		 * to clear all_zones_contended.
		 */
		all_zones_contended &= zone_contended;
1561

1562
		/* If a normal allocation would succeed, stop compacting */
1563
		if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1564
					ac->classzone_idx, alloc_flags)) {
1565 1566 1567 1568 1569 1570 1571
			/*
			 * We think the allocation will succeed in this zone,
			 * but it is not certain, hence the false. The caller
			 * will repeat this with true if allocation indeed
			 * succeeds in this zone.
			 */
			compaction_defer_reset(zone, order, false);
1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585
			/*
			 * It is possible that async compaction aborted due to
			 * need_resched() and the watermarks were ok thanks to
			 * somebody else freeing memory. The allocation can
			 * however still fail so we better signal the
			 * need_resched() contention anyway (this will not
			 * prevent the allocation attempt).
			 */
			if (zone_contended == COMPACT_CONTENDED_SCHED)
				*contended = COMPACT_CONTENDED_SCHED;

			goto break_loop;
		}

1586
		if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
1587 1588 1589 1590 1591 1592 1593
			/*
			 * We think that allocation won't succeed in this zone
			 * so we defer compaction there. If it ends up
			 * succeeding after all, it will be reset.
			 */
			defer_compaction(zone, order);
		}
1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614

		/*
		 * We might have stopped compacting due to need_resched() in
		 * async compaction, or due to a fatal signal detected. In that
		 * case do not try further zones and signal need_resched()
		 * contention.
		 */
		if ((zone_contended == COMPACT_CONTENDED_SCHED)
					|| fatal_signal_pending(current)) {
			*contended = COMPACT_CONTENDED_SCHED;
			goto break_loop;
		}

		continue;
break_loop:
		/*
		 * We might not have tried all the zones, so  be conservative
		 * and assume they are not all lock contended.
		 */
		all_zones_contended = 0;
		break;
1615 1616
	}

1617 1618 1619 1620 1621 1622 1623
	/*
	 * If at least one zone wasn't deferred or skipped, we report if all
	 * zones that were tried were lock contended.
	 */
	if (rc > COMPACT_SKIPPED && all_zones_contended)
		*contended = COMPACT_CONTENDED_LOCK;

1624 1625 1626 1627
	return rc;
}


1628
/* Compact all zones within a node */
1629
static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1630 1631 1632 1633 1634 1635 1636 1637 1638 1639
{
	int zoneid;
	struct zone *zone;

	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {

		zone = &pgdat->node_zones[zoneid];
		if (!populated_zone(zone))
			continue;

1640 1641 1642 1643 1644
		cc->nr_freepages = 0;
		cc->nr_migratepages = 0;
		cc->zone = zone;
		INIT_LIST_HEAD(&cc->freepages);
		INIT_LIST_HEAD(&cc->migratepages);
1645

1646 1647 1648 1649 1650
		/*
		 * When called via /proc/sys/vm/compact_memory
		 * this makes sure we compact the whole zone regardless of
		 * cached scanner positions.
		 */
1651
		if (is_via_compact_memory(cc->order))
1652 1653
			__reset_isolation_suitable(zone);

1654 1655
		if (is_via_compact_memory(cc->order) ||
				!compaction_deferred(zone, cc->order))
1656
			compact_zone(zone, cc);
1657

1658
		if (cc->order > 0) {
1659 1660 1661
			if (zone_watermark_ok(zone, cc->order,
						low_wmark_pages(zone), 0, 0))
				compaction_defer_reset(zone, cc->order, false);
1662 1663
		}

1664 1665
		VM_BUG_ON(!list_empty(&cc->freepages));
		VM_BUG_ON(!list_empty(&cc->migratepages));
1666 1667 1668
	}
}

1669
void compact_pgdat(pg_data_t *pgdat, int order)
1670 1671 1672
{
	struct compact_control cc = {
		.order = order,
1673
		.mode = MIGRATE_ASYNC,
1674 1675
	};

1676 1677 1678
	if (!order)
		return;

1679
	__compact_pgdat(pgdat, &cc);
1680 1681
}

1682
static void compact_node(int nid)
1683 1684 1685
{
	struct compact_control cc = {
		.order = -1,
1686
		.mode = MIGRATE_SYNC,
1687
		.ignore_skip_hint = true,
1688 1689
	};

1690
	__compact_pgdat(NODE_DATA(nid), &cc);
1691 1692
}

1693
/* Compact all nodes in the system */
1694
static void compact_nodes(void)
1695 1696 1697
{
	int nid;

1698 1699 1700
	/* Flush pending updates to the LRU lists */
	lru_add_drain_all();

1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712
	for_each_online_node(nid)
		compact_node(nid);
}

/* The written value is actually unused, all memory is compacted */
int sysctl_compact_memory;

/* This is the entry point for compacting all nodes via /proc/sys/vm */
int sysctl_compaction_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	if (write)
1713
		compact_nodes();
1714 1715 1716

	return 0;
}
1717

1718 1719 1720 1721 1722 1723 1724 1725
int sysctl_extfrag_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	proc_dointvec_minmax(table, write, buffer, length, ppos);

	return 0;
}

1726
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1727
static ssize_t sysfs_compact_node(struct device *dev,
1728
			struct device_attribute *attr,
1729 1730
			const char *buf, size_t count)
{
1731 1732 1733 1734 1735 1736 1737 1738
	int nid = dev->id;

	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
		/* Flush pending updates to the LRU lists */
		lru_add_drain_all();

		compact_node(nid);
	}
1739 1740 1741

	return count;
}
1742
static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1743 1744 1745

int compaction_register_node(struct node *node)
{
1746
	return device_create_file(&node->dev, &dev_attr_compact);
1747 1748 1749 1750
}

void compaction_unregister_node(struct node *node)
{
1751
	return device_remove_file(&node->dev, &dev_attr_compact);
1752 1753
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
1754 1755

#endif /* CONFIG_COMPACTION */