compaction.c 47.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * linux/mm/compaction.c
 *
 * Memory compaction for the reduction of external fragmentation. Note that
 * this heavily depends upon page migration to do all the real heavy
 * lifting
 *
 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
 */
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
#include <linux/backing-dev.h>
15
#include <linux/sysctl.h>
16
#include <linux/sysfs.h>
17
#include <linux/balloon_compaction.h>
18
#include <linux/page-isolation.h>
19
#include <linux/kasan.h>
20 21
#include "internal.h"

22 23 24 25 26 27 28 29 30 31 32 33 34 35 36
#ifdef CONFIG_COMPACTION
static inline void count_compact_event(enum vm_event_item item)
{
	count_vm_event(item);
}

static inline void count_compact_events(enum vm_event_item item, long delta)
{
	count_vm_events(item, delta);
}
#else
#define count_compact_event(item) do { } while (0)
#define count_compact_events(item, delta) do { } while (0)
#endif

37
#if defined CONFIG_COMPACTION || defined CONFIG_CMA
38 39 40 41 42 43 44
#ifdef CONFIG_TRACEPOINTS
static const char *const compaction_status_string[] = {
	"deferred",
	"skipped",
	"continue",
	"partial",
	"complete",
45 46
	"no_suitable_page",
	"not_suitable_zone",
47 48
};
#endif
49

50 51 52
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>

53 54 55
static unsigned long release_freepages(struct list_head *freelist)
{
	struct page *page, *next;
56
	unsigned long high_pfn = 0;
57 58

	list_for_each_entry_safe(page, next, freelist, lru) {
59
		unsigned long pfn = page_to_pfn(page);
60 61
		list_del(&page->lru);
		__free_page(page);
62 63
		if (pfn > high_pfn)
			high_pfn = pfn;
64 65
	}

66
	return high_pfn;
67 68
}

69 70 71 72 73 74 75
static void map_pages(struct list_head *list)
{
	struct page *page;

	list_for_each_entry(page, list, lru) {
		arch_alloc_page(page, 0);
		kernel_map_pages(page, 1, 1);
76
		kasan_alloc_pages(page, 0);
77 78 79
	}
}

80 81 82 83 84
static inline bool migrate_async_suitable(int migratetype)
{
	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
}

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
/*
 * Check that the whole (or subset of) a pageblock given by the interval of
 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
 * with the migration of free compaction scanner. The scanners then need to
 * use only pfn_valid_within() check for arches that allow holes within
 * pageblocks.
 *
 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
 *
 * It's possible on some configurations to have a setup like node0 node1 node0
 * i.e. it's possible that all pages within a zones range of pages do not
 * belong to a single zone. We assume that a border between node0 and node1
 * can occur within a single pageblock, but not a node0 node1 node0
 * interleaving within a single pageblock. It is therefore sufficient to check
 * the first and last page of a pageblock and avoid checking each individual
 * page in a pageblock.
 */
static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
				unsigned long end_pfn, struct zone *zone)
{
	struct page *start_page;
	struct page *end_page;

	/* end_pfn is one past the range we are checking */
	end_pfn--;

	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
		return NULL;

	start_page = pfn_to_page(start_pfn);

	if (page_zone(start_page) != zone)
		return NULL;

	end_page = pfn_to_page(end_pfn);

	/* This gives a shorter code than deriving page_zone(end_page) */
	if (page_zone_id(start_page) != page_zone_id(end_page))
		return NULL;

	return start_page;
}

128
#ifdef CONFIG_COMPACTION
129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199

/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6

/*
 * Compaction is deferred when compaction fails to result in a page
 * allocation success. 1 << compact_defer_limit compactions are skipped up
 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
 */
void defer_compaction(struct zone *zone, int order)
{
	zone->compact_considered = 0;
	zone->compact_defer_shift++;

	if (order < zone->compact_order_failed)
		zone->compact_order_failed = order;

	if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
		zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;

	trace_mm_compaction_defer_compaction(zone, order);
}

/* Returns true if compaction should be skipped this time */
bool compaction_deferred(struct zone *zone, int order)
{
	unsigned long defer_limit = 1UL << zone->compact_defer_shift;

	if (order < zone->compact_order_failed)
		return false;

	/* Avoid possible overflow */
	if (++zone->compact_considered > defer_limit)
		zone->compact_considered = defer_limit;

	if (zone->compact_considered >= defer_limit)
		return false;

	trace_mm_compaction_deferred(zone, order);

	return true;
}

/*
 * Update defer tracking counters after successful compaction of given order,
 * which means an allocation either succeeded (alloc_success == true) or is
 * expected to succeed.
 */
void compaction_defer_reset(struct zone *zone, int order,
		bool alloc_success)
{
	if (alloc_success) {
		zone->compact_considered = 0;
		zone->compact_defer_shift = 0;
	}
	if (order >= zone->compact_order_failed)
		zone->compact_order_failed = order + 1;

	trace_mm_compaction_defer_reset(zone, order);
}

/* Returns true if restarting compaction after many failures */
bool compaction_restarting(struct zone *zone, int order)
{
	if (order < zone->compact_order_failed)
		return false;

	return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
		zone->compact_considered >= 1UL << zone->compact_defer_shift;
}

200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
/* Returns true if the pageblock should be scanned for pages to isolate. */
static inline bool isolation_suitable(struct compact_control *cc,
					struct page *page)
{
	if (cc->ignore_skip_hint)
		return true;

	return !get_pageblock_skip(page);
}

/*
 * This function is called to clear all cached information on pageblocks that
 * should be skipped for page isolation when the migrate and free page scanner
 * meet.
 */
215
static void __reset_isolation_suitable(struct zone *zone)
216 217
{
	unsigned long start_pfn = zone->zone_start_pfn;
218
	unsigned long end_pfn = zone_end_pfn(zone);
219 220
	unsigned long pfn;

221 222
	zone->compact_cached_migrate_pfn[0] = start_pfn;
	zone->compact_cached_migrate_pfn[1] = start_pfn;
223
	zone->compact_cached_free_pfn = end_pfn;
224
	zone->compact_blockskip_flush = false;
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242

	/* Walk the zone and mark every pageblock as suitable for isolation */
	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
		struct page *page;

		cond_resched();

		if (!pfn_valid(pfn))
			continue;

		page = pfn_to_page(pfn);
		if (zone != page_zone(page))
			continue;

		clear_pageblock_skip(page);
	}
}

243 244 245 246 247 248 249 250 251 252 253 254 255 256 257
void reset_isolation_suitable(pg_data_t *pgdat)
{
	int zoneid;

	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
		struct zone *zone = &pgdat->node_zones[zoneid];
		if (!populated_zone(zone))
			continue;

		/* Only flush if a full compaction finished recently */
		if (zone->compact_blockskip_flush)
			__reset_isolation_suitable(zone);
	}
}

258 259
/*
 * If no pages were isolated then mark this pageblock to be skipped in the
260
 * future. The information is later cleared by __reset_isolation_suitable().
261
 */
262 263
static void update_pageblock_skip(struct compact_control *cc,
			struct page *page, unsigned long nr_isolated,
264
			bool migrate_scanner)
265
{
266
	struct zone *zone = cc->zone;
267
	unsigned long pfn;
268 269 270 271

	if (cc->ignore_skip_hint)
		return;

272 273 274
	if (!page)
		return;

275 276 277
	if (nr_isolated)
		return;

278
	set_pageblock_skip(page);
279

280 281 282 283 284 285
	pfn = page_to_pfn(page);

	/* Update where async and sync compaction should restart */
	if (migrate_scanner) {
		if (pfn > zone->compact_cached_migrate_pfn[0])
			zone->compact_cached_migrate_pfn[0] = pfn;
286 287
		if (cc->mode != MIGRATE_ASYNC &&
		    pfn > zone->compact_cached_migrate_pfn[1])
288 289 290 291
			zone->compact_cached_migrate_pfn[1] = pfn;
	} else {
		if (pfn < zone->compact_cached_free_pfn)
			zone->compact_cached_free_pfn = pfn;
292
	}
293 294 295 296 297 298 299 300
}
#else
static inline bool isolation_suitable(struct compact_control *cc,
					struct page *page)
{
	return true;
}

301 302
static void update_pageblock_skip(struct compact_control *cc,
			struct page *page, unsigned long nr_isolated,
303
			bool migrate_scanner)
304 305 306 307
{
}
#endif /* CONFIG_COMPACTION */

308 309 310 311 312 313 314 315 316 317
/*
 * Compaction requires the taking of some coarse locks that are potentially
 * very heavily contended. For async compaction, back out if the lock cannot
 * be taken immediately. For sync compaction, spin on the lock if needed.
 *
 * Returns true if the lock is held
 * Returns false if the lock is not held and compaction should abort
 */
static bool compact_trylock_irqsave(spinlock_t *lock, unsigned long *flags,
						struct compact_control *cc)
318
{
319 320 321 322 323 324 325 326
	if (cc->mode == MIGRATE_ASYNC) {
		if (!spin_trylock_irqsave(lock, *flags)) {
			cc->contended = COMPACT_CONTENDED_LOCK;
			return false;
		}
	} else {
		spin_lock_irqsave(lock, *flags);
	}
327

328
	return true;
329 330
}

331 332
/*
 * Compaction requires the taking of some coarse locks that are potentially
333 334 335 336 337 338 339
 * very heavily contended. The lock should be periodically unlocked to avoid
 * having disabled IRQs for a long time, even when there is nobody waiting on
 * the lock. It might also be that allowing the IRQs will result in
 * need_resched() becoming true. If scheduling is needed, async compaction
 * aborts. Sync compaction schedules.
 * Either compaction type will also abort if a fatal signal is pending.
 * In either case if the lock was locked, it is dropped and not regained.
340
 *
341 342 343 344
 * Returns true if compaction should abort due to fatal signal pending, or
 *		async compaction due to need_resched()
 * Returns false when compaction can continue (sync compaction might have
 *		scheduled)
345
 */
346 347
static bool compact_unlock_should_abort(spinlock_t *lock,
		unsigned long flags, bool *locked, struct compact_control *cc)
348
{
349 350 351 352
	if (*locked) {
		spin_unlock_irqrestore(lock, flags);
		*locked = false;
	}
353

354 355 356 357
	if (fatal_signal_pending(current)) {
		cc->contended = COMPACT_CONTENDED_SCHED;
		return true;
	}
358

359
	if (need_resched()) {
360
		if (cc->mode == MIGRATE_ASYNC) {
361 362
			cc->contended = COMPACT_CONTENDED_SCHED;
			return true;
363 364 365 366
		}
		cond_resched();
	}

367
	return false;
368 369
}

370 371 372
/*
 * Aside from avoiding lock contention, compaction also periodically checks
 * need_resched() and either schedules in sync compaction or aborts async
373
 * compaction. This is similar to what compact_unlock_should_abort() does, but
374 375 376 377 378 379 380 381 382 383
 * is used where no lock is concerned.
 *
 * Returns false when no scheduling was needed, or sync compaction scheduled.
 * Returns true when async compaction should abort.
 */
static inline bool compact_should_abort(struct compact_control *cc)
{
	/* async compaction aborts if contended */
	if (need_resched()) {
		if (cc->mode == MIGRATE_ASYNC) {
384
			cc->contended = COMPACT_CONTENDED_SCHED;
385 386 387 388 389 390 391 392 393
			return true;
		}

		cond_resched();
	}

	return false;
}

394
/*
395 396 397
 * Isolate free pages onto a private freelist. If @strict is true, will abort
 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
 * (even though it may still end up isolating some pages).
398
 */
399
static unsigned long isolate_freepages_block(struct compact_control *cc,
400
				unsigned long *start_pfn,
401 402 403
				unsigned long end_pfn,
				struct list_head *freelist,
				bool strict)
404
{
405
	int nr_scanned = 0, total_isolated = 0;
406
	struct page *cursor, *valid_page = NULL;
407
	unsigned long flags = 0;
408
	bool locked = false;
409
	unsigned long blockpfn = *start_pfn;
410 411 412

	cursor = pfn_to_page(blockpfn);

413
	/* Isolate free pages. */
414 415 416 417
	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
		int isolated, i;
		struct page *page = cursor;

418 419 420 421 422 423 424 425 426 427
		/*
		 * Periodically drop the lock (if held) regardless of its
		 * contention, to give chance to IRQs. Abort if fatal signal
		 * pending or async compaction detects need_resched()
		 */
		if (!(blockpfn % SWAP_CLUSTER_MAX)
		    && compact_unlock_should_abort(&cc->zone->lock, flags,
								&locked, cc))
			break;

428
		nr_scanned++;
429
		if (!pfn_valid_within(blockpfn))
430 431
			goto isolate_fail;

432 433
		if (!valid_page)
			valid_page = page;
434
		if (!PageBuddy(page))
435
			goto isolate_fail;
436 437

		/*
438 439 440 441 442
		 * If we already hold the lock, we can skip some rechecking.
		 * Note that if we hold the lock now, checked_pageblock was
		 * already set in some previous iteration (or strict is true),
		 * so it is correct to skip the suitable migration target
		 * recheck as well.
443
		 */
444 445 446 447 448 449 450 451 452
		if (!locked) {
			/*
			 * The zone lock must be held to isolate freepages.
			 * Unfortunately this is a very coarse lock and can be
			 * heavily contended if there are parallel allocations
			 * or parallel compactions. For async compaction do not
			 * spin on the lock and we acquire the lock as late as
			 * possible.
			 */
453 454
			locked = compact_trylock_irqsave(&cc->zone->lock,
								&flags, cc);
455 456
			if (!locked)
				break;
457

458 459 460 461
			/* Recheck this is a buddy page under lock */
			if (!PageBuddy(page))
				goto isolate_fail;
		}
462 463 464 465 466 467 468 469 470 471 472

		/* Found a free page, break it into order-0 pages */
		isolated = split_free_page(page);
		total_isolated += isolated;
		for (i = 0; i < isolated; i++) {
			list_add(&page->lru, freelist);
			page++;
		}

		/* If a page was split, advance to the end of it */
		if (isolated) {
473 474 475 476 477 478 479
			cc->nr_freepages += isolated;
			if (!strict &&
				cc->nr_migratepages <= cc->nr_freepages) {
				blockpfn += isolated;
				break;
			}

480 481
			blockpfn += isolated - 1;
			cursor += isolated - 1;
482
			continue;
483
		}
484 485 486 487 488 489 490

isolate_fail:
		if (strict)
			break;
		else
			continue;

491 492
	}

493 494 495
	trace_mm_compaction_isolate_freepages(*start_pfn, blockpfn,
					nr_scanned, total_isolated);

496 497 498
	/* Record how far we have got within the block */
	*start_pfn = blockpfn;

499 500 501 502 503
	/*
	 * If strict isolation is requested by CMA then check that all the
	 * pages requested were isolated. If there were any failures, 0 is
	 * returned and CMA will fail.
	 */
504
	if (strict && blockpfn < end_pfn)
505 506 507 508 509
		total_isolated = 0;

	if (locked)
		spin_unlock_irqrestore(&cc->zone->lock, flags);

510 511
	/* Update the pageblock-skip if the whole pageblock was scanned */
	if (blockpfn == end_pfn)
512
		update_pageblock_skip(cc, valid_page, total_isolated, false);
513

514
	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
515
	if (total_isolated)
516
		count_compact_events(COMPACTISOLATED, total_isolated);
517 518 519
	return total_isolated;
}

520 521 522 523 524 525 526 527 528 529 530 531 532
/**
 * isolate_freepages_range() - isolate free pages.
 * @start_pfn: The first PFN to start isolating.
 * @end_pfn:   The one-past-last PFN.
 *
 * Non-free pages, invalid PFNs, or zone boundaries within the
 * [start_pfn, end_pfn) range are considered errors, cause function to
 * undo its actions and return zero.
 *
 * Otherwise, function returns one-past-the-last PFN of isolated page
 * (which may be greater then end_pfn if end fell in a middle of
 * a free page).
 */
533
unsigned long
534 535
isolate_freepages_range(struct compact_control *cc,
			unsigned long start_pfn, unsigned long end_pfn)
536
{
537
	unsigned long isolated, pfn, block_end_pfn;
538 539
	LIST_HEAD(freelist);

540 541 542 543 544
	pfn = start_pfn;
	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);

	for (; pfn < end_pfn; pfn += isolated,
				block_end_pfn += pageblock_nr_pages) {
545 546
		/* Protect pfn from changing by isolate_freepages_block */
		unsigned long isolate_start_pfn = pfn;
547 548 549

		block_end_pfn = min(block_end_pfn, end_pfn);

550 551 552 553 554 555 556 557 558 559
		/*
		 * pfn could pass the block_end_pfn if isolated freepage
		 * is more than pageblock order. In this case, we adjust
		 * scanning range to right one.
		 */
		if (pfn >= block_end_pfn) {
			block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
			block_end_pfn = min(block_end_pfn, end_pfn);
		}

560 561 562
		if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
			break;

563 564
		isolated = isolate_freepages_block(cc, &isolate_start_pfn,
						block_end_pfn, &freelist, true);
565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593

		/*
		 * In strict mode, isolate_freepages_block() returns 0 if
		 * there are any holes in the block (ie. invalid PFNs or
		 * non-free pages).
		 */
		if (!isolated)
			break;

		/*
		 * If we managed to isolate pages, it is always (1 << n) *
		 * pageblock_nr_pages for some non-negative n.  (Max order
		 * page may span two pageblocks).
		 */
	}

	/* split_free_page does not map the pages */
	map_pages(&freelist);

	if (pfn < end_pfn) {
		/* Loop terminated early, cleanup. */
		release_freepages(&freelist);
		return 0;
	}

	/* We don't use freelists for anything. */
	return pfn;
}

594
/* Update the number of anon and file isolated pages in the zone */
595
static void acct_isolated(struct zone *zone, struct compact_control *cc)
596 597
{
	struct page *page;
598
	unsigned int count[2] = { 0, };
599

600 601 602
	if (list_empty(&cc->migratepages))
		return;

603 604
	list_for_each_entry(page, &cc->migratepages, lru)
		count[!!page_is_file_cache(page)]++;
605

606 607
	mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
	mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
608 609 610 611 612
}

/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct zone *zone)
{
613
	unsigned long active, inactive, isolated;
614 615 616

	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
					zone_page_state(zone, NR_INACTIVE_ANON);
617 618
	active = zone_page_state(zone, NR_ACTIVE_FILE) +
					zone_page_state(zone, NR_ACTIVE_ANON);
619 620 621
	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
					zone_page_state(zone, NR_ISOLATED_ANON);

622
	return isolated > (inactive + active) / 2;
623 624
}

625
/**
626 627
 * isolate_migratepages_block() - isolate all migrate-able pages within
 *				  a single pageblock
628
 * @cc:		Compaction control structure.
629 630 631
 * @low_pfn:	The first PFN to isolate
 * @end_pfn:	The one-past-the-last PFN to isolate, within same pageblock
 * @isolate_mode: Isolation mode to be used.
632 633
 *
 * Isolate all pages that can be migrated from the range specified by
634 635 636 637
 * [low_pfn, end_pfn). The range is expected to be within same pageblock.
 * Returns zero if there is a fatal signal pending, otherwise PFN of the
 * first page that was not scanned (which may be both less, equal to or more
 * than end_pfn).
638
 *
639 640 641
 * The pages are isolated on cc->migratepages list (not required to be empty),
 * and cc->nr_migratepages is updated accordingly. The cc->migrate_pfn field
 * is neither read nor updated.
642
 */
643 644 645
static unsigned long
isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
			unsigned long end_pfn, isolate_mode_t isolate_mode)
646
{
647
	struct zone *zone = cc->zone;
648
	unsigned long nr_scanned = 0, nr_isolated = 0;
649
	struct list_head *migratelist = &cc->migratepages;
650
	struct lruvec *lruvec;
651
	unsigned long flags = 0;
652
	bool locked = false;
653
	struct page *page = NULL, *valid_page = NULL;
654
	unsigned long start_pfn = low_pfn;
655 656 657 658 659 660 661

	/*
	 * Ensure that there are not too many pages isolated from the LRU
	 * list by either parallel reclaimers or compaction. If there are,
	 * delay for some time until fewer pages are isolated
	 */
	while (unlikely(too_many_isolated(zone))) {
662
		/* async migration should just abort */
663
		if (cc->mode == MIGRATE_ASYNC)
664
			return 0;
665

666 667 668
		congestion_wait(BLK_RW_ASYNC, HZ/10);

		if (fatal_signal_pending(current))
669
			return 0;
670 671
	}

672 673
	if (compact_should_abort(cc))
		return 0;
674

675 676
	/* Time to isolate some pages for migration */
	for (; low_pfn < end_pfn; low_pfn++) {
677 678 679 680 681 682 683 684 685
		/*
		 * Periodically drop the lock (if held) regardless of its
		 * contention, to give chance to IRQs. Abort async compaction
		 * if contended.
		 */
		if (!(low_pfn % SWAP_CLUSTER_MAX)
		    && compact_unlock_should_abort(&zone->lru_lock, flags,
								&locked, cc))
			break;
686

687 688
		if (!pfn_valid_within(low_pfn))
			continue;
689
		nr_scanned++;
690 691

		page = pfn_to_page(low_pfn);
692

693 694 695
		if (!valid_page)
			valid_page = page;

696
		/*
697 698 699 700
		 * Skip if free. We read page order here without zone lock
		 * which is generally unsafe, but the race window is small and
		 * the worst thing that can happen is that we skip some
		 * potential isolation targets.
701
		 */
702 703 704 705 706 707 708 709 710 711
		if (PageBuddy(page)) {
			unsigned long freepage_order = page_order_unsafe(page);

			/*
			 * Without lock, we cannot be sure that what we got is
			 * a valid page order. Consider only values in the
			 * valid order range to prevent low_pfn overflow.
			 */
			if (freepage_order > 0 && freepage_order < MAX_ORDER)
				low_pfn += (1UL << freepage_order) - 1;
712
			continue;
713
		}
714

715 716 717 718 719 720 721
		/*
		 * Check may be lockless but that's ok as we recheck later.
		 * It's possible to migrate LRU pages and balloon pages
		 * Skip any other type of page
		 */
		if (!PageLRU(page)) {
			if (unlikely(balloon_page_movable(page))) {
722
				if (balloon_page_isolate(page)) {
723
					/* Successfully isolated */
724
					goto isolate_success;
725 726
				}
			}
727
			continue;
728
		}
729 730

		/*
731 732 733 734 735 736 737 738
		 * PageLRU is set. lru_lock normally excludes isolation
		 * splitting and collapsing (collapsing has already happened
		 * if PageLRU is set) but the lock is not necessarily taken
		 * here and it is wasteful to take it just to check transhuge.
		 * Check TransHuge without lock and skip the whole pageblock if
		 * it's either a transhuge or hugetlbfs page, as calling
		 * compound_order() without preventing THP from splitting the
		 * page underneath us may return surprising results.
739
		 */
740 741
		if (PageTransHuge(page)) {
			if (!locked)
742 743 744 745 746
				low_pfn = ALIGN(low_pfn + 1,
						pageblock_nr_pages) - 1;
			else
				low_pfn += (1 << compound_order(page)) - 1;

747 748 749
			continue;
		}

750 751 752 753 754 755 756 757 758
		/*
		 * Migration will fail if an anonymous page is pinned in memory,
		 * so avoid taking lru_lock and isolating it unnecessarily in an
		 * admittedly racy check.
		 */
		if (!page_mapping(page) &&
		    page_count(page) > page_mapcount(page))
			continue;

759 760
		/* If we already hold the lock, we can skip some rechecking */
		if (!locked) {
761 762
			locked = compact_trylock_irqsave(&zone->lru_lock,
								&flags, cc);
763 764
			if (!locked)
				break;
765

766 767 768 769 770 771 772
			/* Recheck PageLRU and PageTransHuge under lock */
			if (!PageLRU(page))
				continue;
			if (PageTransHuge(page)) {
				low_pfn += (1 << compound_order(page)) - 1;
				continue;
			}
773 774
		}

775 776
		lruvec = mem_cgroup_page_lruvec(page, zone);

777
		/* Try isolate the page */
778
		if (__isolate_lru_page(page, isolate_mode) != 0)
779 780
			continue;

781
		VM_BUG_ON_PAGE(PageTransCompound(page), page);
782

783
		/* Successfully isolated */
784
		del_page_from_lru_list(page, lruvec, page_lru(page));
785 786

isolate_success:
787 788
		list_add(&page->lru, migratelist);
		cc->nr_migratepages++;
789
		nr_isolated++;
790 791

		/* Avoid isolating too much */
792 793
		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
			++low_pfn;
794
			break;
795
		}
796 797
	}

798 799 800 801 802 803 804
	/*
	 * The PageBuddy() check could have potentially brought us outside
	 * the range to be scanned.
	 */
	if (unlikely(low_pfn > end_pfn))
		low_pfn = end_pfn;

805 806
	if (locked)
		spin_unlock_irqrestore(&zone->lru_lock, flags);
807

808 809 810 811
	/*
	 * Update the pageblock-skip information and cached scanner pfn,
	 * if the whole pageblock was scanned without isolating any page.
	 */
812
	if (low_pfn == end_pfn)
813
		update_pageblock_skip(cc, valid_page, nr_isolated, true);
814

815 816
	trace_mm_compaction_isolate_migratepages(start_pfn, low_pfn,
						nr_scanned, nr_isolated);
817

818
	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
819
	if (nr_isolated)
820
		count_compact_events(COMPACTISOLATED, nr_isolated);
821

822 823 824
	return low_pfn;
}

825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849
/**
 * isolate_migratepages_range() - isolate migrate-able pages in a PFN range
 * @cc:        Compaction control structure.
 * @start_pfn: The first PFN to start isolating.
 * @end_pfn:   The one-past-last PFN.
 *
 * Returns zero if isolation fails fatally due to e.g. pending signal.
 * Otherwise, function returns one-past-the-last PFN of isolated page
 * (which may be greater than end_pfn if end fell in a middle of a THP page).
 */
unsigned long
isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
							unsigned long end_pfn)
{
	unsigned long pfn, block_end_pfn;

	/* Scan block by block. First and last block may be incomplete */
	pfn = start_pfn;
	block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);

	for (; pfn < end_pfn; pfn = block_end_pfn,
				block_end_pfn += pageblock_nr_pages) {

		block_end_pfn = min(block_end_pfn, end_pfn);

850
		if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
851 852 853 854 855 856 857 858 859 860 861 862 863 864 865
			continue;

		pfn = isolate_migratepages_block(cc, pfn, block_end_pfn,
							ISOLATE_UNEVICTABLE);

		/*
		 * In case of fatal failure, release everything that might
		 * have been isolated in the previous iteration, and signal
		 * the failure back to caller.
		 */
		if (!pfn) {
			putback_movable_pages(&cc->migratepages);
			cc->nr_migratepages = 0;
			break;
		}
866 867 868

		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
			break;
869 870 871 872 873 874
	}
	acct_isolated(cc->zone, cc);

	return pfn;
}

875 876
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899

/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
{
	/* If the page is a large free page, then disallow migration */
	if (PageBuddy(page)) {
		/*
		 * We are checking page_order without zone->lock taken. But
		 * the only small danger is that we skip a potentially suitable
		 * pageblock, so it's not worth to check order for valid range.
		 */
		if (page_order_unsafe(page) >= pageblock_order)
			return false;
	}

	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
	if (migrate_async_suitable(get_pageblock_migratetype(page)))
		return true;

	/* Otherwise skip the block */
	return false;
}

900
/*
901 902
 * Based on information in the current compact_control, find blocks
 * suitable for isolating free pages from and then isolate them.
903
 */
904
static void isolate_freepages(struct compact_control *cc)
905
{
906
	struct zone *zone = cc->zone;
907
	struct page *page;
908
	unsigned long block_start_pfn;	/* start of current pageblock */
909
	unsigned long isolate_start_pfn; /* exact pfn we start at */
910 911
	unsigned long block_end_pfn;	/* end of current pageblock */
	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
912
	struct list_head *freelist = &cc->freepages;
913

914 915
	/*
	 * Initialise the free scanner. The starting point is where we last
916
	 * successfully isolated from, zone-cached value, or the end of the
917 918
	 * zone when isolating for the first time. For looping we also need
	 * this pfn aligned down to the pageblock boundary, because we do
919 920 921
	 * block_start_pfn -= pageblock_nr_pages in the for loop.
	 * For ending point, take care when isolating in last pageblock of a
	 * a zone which ends in the middle of a pageblock.
922 923
	 * The low boundary is the end of the pageblock the migration scanner
	 * is using.
924
	 */
925
	isolate_start_pfn = cc->free_pfn;
926 927 928
	block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
						zone_end_pfn(zone));
929
	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
930

931 932 933 934 935
	/*
	 * Isolate free pages until enough are available to migrate the
	 * pages on cc->migratepages. We stop searching if the migrate
	 * and free page scanners meet or enough free pages are isolated.
	 */
936 937
	for (; block_start_pfn >= low_pfn &&
			cc->nr_migratepages > cc->nr_freepages;
938
				block_end_pfn = block_start_pfn,
939 940
				block_start_pfn -= pageblock_nr_pages,
				isolate_start_pfn = block_start_pfn) {
941

942 943 944
		/*
		 * This can iterate a massively long zone without finding any
		 * suitable migration targets, so periodically check if we need
945
		 * to schedule, or even abort async compaction.
946
		 */
947 948 949
		if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
						&& compact_should_abort(cc))
			break;
950

951 952 953
		page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
									zone);
		if (!page)
954 955 956
			continue;

		/* Check the block is suitable for migration */
957
		if (!suitable_migration_target(page))
958
			continue;
959

960 961 962 963
		/* If isolation recently failed, do not retry */
		if (!isolation_suitable(cc, page))
			continue;

964
		/* Found a block suitable for isolating free pages from. */
965
		isolate_freepages_block(cc, &isolate_start_pfn,
966
					block_end_pfn, freelist, false);
967

968 969 970 971 972 973 974 975 976 977 978 979 980
		/*
		 * Remember where the free scanner should restart next time,
		 * which is where isolate_freepages_block() left off.
		 * But if it scanned the whole pageblock, isolate_start_pfn
		 * now points at block_end_pfn, which is the start of the next
		 * pageblock.
		 * In that case we will however want to restart at the start
		 * of the previous pageblock.
		 */
		cc->free_pfn = (isolate_start_pfn < block_end_pfn) ?
				isolate_start_pfn :
				block_start_pfn - pageblock_nr_pages;

981 982 983 984 985 986
		/*
		 * isolate_freepages_block() might have aborted due to async
		 * compaction being contended
		 */
		if (cc->contended)
			break;
987 988 989 990 991
	}

	/* split_free_page does not map the pages */
	map_pages(freelist);

992 993 994 995
	/*
	 * If we crossed the migrate scanner, we want to keep it that way
	 * so that compact_finished() may detect this
	 */
996
	if (block_start_pfn < low_pfn)
997
		cc->free_pfn = cc->migrate_pfn;
998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
}

/*
 * This is a migrate-callback that "allocates" freepages by taking pages
 * from the isolated freelists in the block we are migrating to.
 */
static struct page *compaction_alloc(struct page *migratepage,
					unsigned long data,
					int **result)
{
	struct compact_control *cc = (struct compact_control *)data;
	struct page *freepage;

1011 1012 1013 1014
	/*
	 * Isolate free pages if necessary, and if we are not aborting due to
	 * contention.
	 */
1015
	if (list_empty(&cc->freepages)) {
1016
		if (!cc->contended)
1017
			isolate_freepages(cc);
1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030

		if (list_empty(&cc->freepages))
			return NULL;
	}

	freepage = list_entry(cc->freepages.next, struct page, lru);
	list_del(&freepage->lru);
	cc->nr_freepages--;

	return freepage;
}

/*
1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042
 * This is a migrate-callback that "frees" freepages back to the isolated
 * freelist.  All pages on the freelist are from the same zone, so there is no
 * special handling needed for NUMA.
 */
static void compaction_free(struct page *page, unsigned long data)
{
	struct compact_control *cc = (struct compact_control *)data;

	list_add(&page->lru, &cc->freepages);
	cc->nr_freepages++;
}

1043 1044 1045 1046 1047 1048 1049
/* possible outcome of isolate_migratepages */
typedef enum {
	ISOLATE_ABORT,		/* Abort compaction now */
	ISOLATE_NONE,		/* No pages isolated, continue scanning */
	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
} isolate_migrate_t;

1050 1051 1052 1053 1054 1055
/*
 * Allow userspace to control policy on scanning the unevictable LRU for
 * compactable pages.
 */
int sysctl_compact_unevictable_allowed __read_mostly = 1;

1056
/*
1057 1058 1059
 * Isolate all pages that can be migrated from the first suitable block,
 * starting at the block pointed to by the migrate scanner pfn within
 * compact_control.
1060 1061 1062 1063 1064
 */
static isolate_migrate_t isolate_migratepages(struct zone *zone,
					struct compact_control *cc)
{
	unsigned long low_pfn, end_pfn;
1065 1066
	struct page *page;
	const isolate_mode_t isolate_mode =
1067
		(sysctl_compact_unevictable_allowed ? ISOLATE_UNEVICTABLE : 0) |
1068
		(cc->mode == MIGRATE_ASYNC ? ISOLATE_ASYNC_MIGRATE : 0);
1069

1070 1071 1072 1073 1074
	/*
	 * Start at where we last stopped, or beginning of the zone as
	 * initialized by compact_zone()
	 */
	low_pfn = cc->migrate_pfn;
1075 1076

	/* Only scan within a pageblock boundary */
1077
	end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
1078

1079 1080 1081 1082 1083 1084
	/*
	 * Iterate over whole pageblocks until we find the first suitable.
	 * Do not cross the free scanner.
	 */
	for (; end_pfn <= cc->free_pfn;
			low_pfn = end_pfn, end_pfn += pageblock_nr_pages) {
1085

1086 1087 1088 1089 1090 1091 1092 1093
		/*
		 * This can potentially iterate a massively long zone with
		 * many pageblocks unsuitable, so periodically check if we
		 * need to schedule, or even abort async compaction.
		 */
		if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
						&& compact_should_abort(cc))
			break;
1094

1095 1096
		page = pageblock_pfn_to_page(low_pfn, end_pfn, zone);
		if (!page)
1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115
			continue;

		/* If isolation recently failed, do not retry */
		if (!isolation_suitable(cc, page))
			continue;

		/*
		 * For async compaction, also only scan in MOVABLE blocks.
		 * Async compaction is optimistic to see if the minimum amount
		 * of work satisfies the allocation.
		 */
		if (cc->mode == MIGRATE_ASYNC &&
		    !migrate_async_suitable(get_pageblock_migratetype(page)))
			continue;

		/* Perform the isolation */
		low_pfn = isolate_migratepages_block(cc, low_pfn, end_pfn,
								isolate_mode);

1116 1117
		if (!low_pfn || cc->contended) {
			acct_isolated(zone, cc);
1118
			return ISOLATE_ABORT;
1119
		}
1120 1121 1122 1123 1124 1125 1126 1127 1128 1129

		/*
		 * Either we isolated something and proceed with migration. Or
		 * we failed and compact_zone should decide if we should
		 * continue or not.
		 */
		break;
	}

	acct_isolated(zone, cc);
1130 1131 1132 1133 1134 1135
	/*
	 * Record where migration scanner will be restarted. If we end up in
	 * the same pageblock as the free scanner, make the scanners fully
	 * meet so that compact_finished() terminates compaction.
	 */
	cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
1136

1137
	return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1138 1139
}

1140
static int __compact_finished(struct zone *zone, struct compact_control *cc,
1141
			    const int migratetype)
1142
{
1143
	unsigned int order;
1144
	unsigned long watermark;
1145

1146
	if (cc->contended || fatal_signal_pending(current))
1147 1148
		return COMPACT_PARTIAL;

1149
	/* Compaction run completes if the migrate and free scanner meet */
1150
	if (cc->free_pfn <= cc->migrate_pfn) {
1151
		/* Let the next compaction start anew. */
1152 1153
		zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
		zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
1154 1155
		zone->compact_cached_free_pfn = zone_end_pfn(zone);

1156 1157 1158 1159 1160 1161 1162 1163 1164
		/*
		 * Mark that the PG_migrate_skip information should be cleared
		 * by kswapd when it goes to sleep. kswapd does not set the
		 * flag itself as the decision to be clear should be directly
		 * based on an allocation request.
		 */
		if (!current_is_kswapd())
			zone->compact_blockskip_flush = true;

1165
		return COMPACT_COMPLETE;
1166
	}
1167

1168 1169 1170 1171
	/*
	 * order == -1 is expected when compacting via
	 * /proc/sys/vm/compact_memory
	 */
1172 1173 1174
	if (cc->order == -1)
		return COMPACT_CONTINUE;

1175 1176 1177
	/* Compaction run is not finished if the watermark is not met */
	watermark = low_wmark_pages(zone);

1178 1179
	if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx,
							cc->alloc_flags))
1180 1181
		return COMPACT_CONTINUE;

1182
	/* Direct compactor: Is a suitable page free? */
1183 1184
	for (order = cc->order; order < MAX_ORDER; order++) {
		struct free_area *area = &zone->free_area[order];
1185
		bool can_steal;
1186 1187

		/* Job done if page is free of the right migratetype */
1188
		if (!list_empty(&area->free_list[migratetype]))
1189 1190
			return COMPACT_PARTIAL;

1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202
#ifdef CONFIG_CMA
		/* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
		if (migratetype == MIGRATE_MOVABLE &&
			!list_empty(&area->free_list[MIGRATE_CMA]))
			return COMPACT_PARTIAL;
#endif
		/*
		 * Job done if allocation would steal freepages from
		 * other migratetype buddy lists.
		 */
		if (find_suitable_fallback(area, order, migratetype,
						true, &can_steal) != -1)
1203 1204 1205
			return COMPACT_PARTIAL;
	}

1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219
	return COMPACT_NO_SUITABLE_PAGE;
}

static int compact_finished(struct zone *zone, struct compact_control *cc,
			    const int migratetype)
{
	int ret;

	ret = __compact_finished(zone, cc, migratetype);
	trace_mm_compaction_finished(zone, cc->order, ret);
	if (ret == COMPACT_NO_SUITABLE_PAGE)
		ret = COMPACT_CONTINUE;

	return ret;
1220 1221
}

1222 1223 1224 1225 1226 1227 1228
/*
 * compaction_suitable: Is this suitable to run compaction on this zone now?
 * Returns
 *   COMPACT_SKIPPED  - If there are too few free pages for compaction
 *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
 *   COMPACT_CONTINUE - If compaction should run now
 */
1229
static unsigned long __compaction_suitable(struct zone *zone, int order,
1230
					int alloc_flags, int classzone_idx)
1231 1232 1233 1234
{
	int fragindex;
	unsigned long watermark;

1235 1236 1237 1238 1239 1240 1241
	/*
	 * order == -1 is expected when compacting via
	 * /proc/sys/vm/compact_memory
	 */
	if (order == -1)
		return COMPACT_CONTINUE;

1242 1243 1244 1245 1246 1247 1248 1249 1250
	watermark = low_wmark_pages(zone);
	/*
	 * If watermarks for high-order allocation are already met, there
	 * should be no need for compaction at all.
	 */
	if (zone_watermark_ok(zone, order, watermark, classzone_idx,
								alloc_flags))
		return COMPACT_PARTIAL;

1251 1252 1253 1254 1255
	/*
	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
	 * This is because during migration, copies of pages need to be
	 * allocated and for a short time, the footprint is higher
	 */
1256 1257
	watermark += (2UL << order);
	if (!zone_watermark_ok(zone, 0, watermark, classzone_idx, alloc_flags))
1258 1259 1260 1261 1262 1263
		return COMPACT_SKIPPED;

	/*
	 * fragmentation index determines if allocation failures are due to
	 * low memory or external fragmentation
	 *
1264 1265
	 * index of -1000 would imply allocations might succeed depending on
	 * watermarks, but we already failed the high-order watermark check
1266 1267 1268 1269 1270 1271 1272
	 * index towards 0 implies failure is due to lack of memory
	 * index towards 1000 implies failure is due to fragmentation
	 *
	 * Only compact if a failure would be due to fragmentation.
	 */
	fragindex = fragmentation_index(zone, order);
	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
1273
		return COMPACT_NOT_SUITABLE_ZONE;
1274 1275 1276 1277

	return COMPACT_CONTINUE;
}

1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290
unsigned long compaction_suitable(struct zone *zone, int order,
					int alloc_flags, int classzone_idx)
{
	unsigned long ret;

	ret = __compaction_suitable(zone, order, alloc_flags, classzone_idx);
	trace_mm_compaction_suitable(zone, order, ret);
	if (ret == COMPACT_NOT_SUITABLE_ZONE)
		ret = COMPACT_SKIPPED;

	return ret;
}

1291 1292 1293
static int compact_zone(struct zone *zone, struct compact_control *cc)
{
	int ret;
1294
	unsigned long start_pfn = zone->zone_start_pfn;
1295
	unsigned long end_pfn = zone_end_pfn(zone);
1296
	const int migratetype = gfpflags_to_migratetype(cc->gfp_mask);
1297
	const bool sync = cc->mode != MIGRATE_ASYNC;
1298
	unsigned long last_migrated_pfn = 0;
1299

1300 1301
	ret = compaction_suitable(zone, cc->order, cc->alloc_flags,
							cc->classzone_idx);
1302 1303 1304 1305 1306 1307 1308 1309 1310 1311
	switch (ret) {
	case COMPACT_PARTIAL:
	case COMPACT_SKIPPED:
		/* Compaction is likely to fail */
		return ret;
	case COMPACT_CONTINUE:
		/* Fall through to compaction */
		;
	}

1312 1313 1314 1315 1316 1317 1318 1319
	/*
	 * Clear pageblock skip if there were failures recently and compaction
	 * is about to be retried after being deferred. kswapd does not do
	 * this reset as it'll reset the cached information when going to sleep.
	 */
	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
		__reset_isolation_suitable(zone);

1320 1321 1322 1323 1324
	/*
	 * Setup to move all movable pages to the end of the zone. Used cached
	 * information on where the scanners should start but check that it
	 * is initialised by ensuring the values are within zone boundaries.
	 */
1325
	cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync];
1326 1327 1328 1329 1330 1331 1332
	cc->free_pfn = zone->compact_cached_free_pfn;
	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
		zone->compact_cached_free_pfn = cc->free_pfn;
	}
	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
		cc->migrate_pfn = start_pfn;
1333 1334
		zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
		zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1335
	}
1336

1337 1338
	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn,
				cc->free_pfn, end_pfn, sync);
1339

1340 1341
	migrate_prep_local();

1342 1343
	while ((ret = compact_finished(zone, cc, migratetype)) ==
						COMPACT_CONTINUE) {
1344
		int err;
1345
		unsigned long isolate_start_pfn = cc->migrate_pfn;
1346

1347 1348 1349
		switch (isolate_migratepages(zone, cc)) {
		case ISOLATE_ABORT:
			ret = COMPACT_PARTIAL;
1350
			putback_movable_pages(&cc->migratepages);
1351
			cc->nr_migratepages = 0;
1352 1353
			goto out;
		case ISOLATE_NONE:
1354 1355 1356 1357 1358 1359
			/*
			 * We haven't isolated and migrated anything, but
			 * there might still be unflushed migrations from
			 * previous cc->order aligned block.
			 */
			goto check_drain;
1360 1361 1362
		case ISOLATE_SUCCESS:
			;
		}
1363

1364
		err = migrate_pages(&cc->migratepages, compaction_alloc,
1365
				compaction_free, (unsigned long)cc, cc->mode,
1366
				MR_COMPACTION);
1367

1368 1369
		trace_mm_compaction_migratepages(cc->nr_migratepages, err,
							&cc->migratepages);
1370

1371 1372
		/* All pages were either migrated or will be released */
		cc->nr_migratepages = 0;
1373
		if (err) {
1374
			putback_movable_pages(&cc->migratepages);
1375 1376 1377 1378 1379
			/*
			 * migrate_pages() may return -ENOMEM when scanners meet
			 * and we want compact_finished() to detect it
			 */
			if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
1380 1381 1382
				ret = COMPACT_PARTIAL;
				goto out;
			}
1383
		}
1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417

		/*
		 * Record where we could have freed pages by migration and not
		 * yet flushed them to buddy allocator. We use the pfn that
		 * isolate_migratepages() started from in this loop iteration
		 * - this is the lowest page that could have been isolated and
		 * then freed by migration.
		 */
		if (!last_migrated_pfn)
			last_migrated_pfn = isolate_start_pfn;

check_drain:
		/*
		 * Has the migration scanner moved away from the previous
		 * cc->order aligned block where we migrated from? If yes,
		 * flush the pages that were freed, so that they can merge and
		 * compact_finished() can detect immediately if allocation
		 * would succeed.
		 */
		if (cc->order > 0 && last_migrated_pfn) {
			int cpu;
			unsigned long current_block_start =
				cc->migrate_pfn & ~((1UL << cc->order) - 1);

			if (last_migrated_pfn < current_block_start) {
				cpu = get_cpu();
				lru_add_drain_cpu(cpu);
				drain_local_pages(zone);
				put_cpu();
				/* No more flushing until we migrate again */
				last_migrated_pfn = 0;
			}
		}

1418 1419
	}

1420
out:
1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438
	/*
	 * Release free pages and update where the free scanner should restart,
	 * so we don't leave any returned pages behind in the next attempt.
	 */
	if (cc->nr_freepages > 0) {
		unsigned long free_pfn = release_freepages(&cc->freepages);

		cc->nr_freepages = 0;
		VM_BUG_ON(free_pfn == 0);
		/* The cached pfn is always the first in a pageblock */
		free_pfn &= ~(pageblock_nr_pages-1);
		/*
		 * Only go back, not forward. The cached pfn might have been
		 * already reset to zone end in compact_finished()
		 */
		if (free_pfn > zone->compact_cached_free_pfn)
			zone->compact_cached_free_pfn = free_pfn;
	}
1439

1440 1441
	trace_mm_compaction_end(start_pfn, cc->migrate_pfn,
				cc->free_pfn, end_pfn, sync, ret);
1442

1443 1444
	return ret;
}
1445

1446
static unsigned long compact_zone_order(struct zone *zone, int order,
1447 1448
		gfp_t gfp_mask, enum migrate_mode mode, int *contended,
		int alloc_flags, int classzone_idx)
1449
{
1450
	unsigned long ret;
1451 1452 1453 1454
	struct compact_control cc = {
		.nr_freepages = 0,
		.nr_migratepages = 0,
		.order = order,
1455
		.gfp_mask = gfp_mask,
1456
		.zone = zone,
1457
		.mode = mode,
1458 1459
		.alloc_flags = alloc_flags,
		.classzone_idx = classzone_idx,
1460 1461 1462 1463
	};
	INIT_LIST_HEAD(&cc.freepages);
	INIT_LIST_HEAD(&cc.migratepages);

1464 1465 1466 1467 1468 1469 1470
	ret = compact_zone(zone, &cc);

	VM_BUG_ON(!list_empty(&cc.freepages));
	VM_BUG_ON(!list_empty(&cc.migratepages));

	*contended = cc.contended;
	return ret;
1471 1472
}

1473 1474
int sysctl_extfrag_threshold = 500;

1475 1476 1477
/**
 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
 * @gfp_mask: The GFP mask of the current allocation
1478 1479 1480
 * @order: The order of the current allocation
 * @alloc_flags: The allocation flags of the current allocation
 * @ac: The context of current allocation
1481
 * @mode: The migration mode for async, sync light, or sync migration
1482 1483
 * @contended: Return value that determines if compaction was aborted due to
 *	       need_resched() or lock contention
1484 1485 1486
 *
 * This is the main entry point for direct page compaction.
 */
1487 1488 1489
unsigned long try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
			int alloc_flags, const struct alloc_context *ac,
			enum migrate_mode mode, int *contended)
1490 1491 1492 1493 1494
{
	int may_enter_fs = gfp_mask & __GFP_FS;
	int may_perform_io = gfp_mask & __GFP_IO;
	struct zoneref *z;
	struct zone *zone;
1495
	int rc = COMPACT_DEFERRED;
1496 1497 1498
	int all_zones_contended = COMPACT_CONTENDED_LOCK; /* init for &= op */

	*contended = COMPACT_CONTENDED_NONE;
1499

1500
	/* Check if the GFP flags allow compaction */
1501
	if (!order || !may_enter_fs || !may_perform_io)
1502
		return COMPACT_SKIPPED;
1503

1504 1505
	trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode);

1506
	/* Compact each zone in the list */
1507 1508
	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
								ac->nodemask) {
1509
		int status;
1510
		int zone_contended;
1511

1512 1513 1514
		if (compaction_deferred(zone, order))
			continue;

1515
		status = compact_zone_order(zone, order, gfp_mask, mode,
1516 1517
				&zone_contended, alloc_flags,
				ac->classzone_idx);
1518
		rc = max(status, rc);
1519 1520 1521 1522 1523
		/*
		 * It takes at least one zone that wasn't lock contended
		 * to clear all_zones_contended.
		 */
		all_zones_contended &= zone_contended;
1524

1525
		/* If a normal allocation would succeed, stop compacting */
1526
		if (zone_watermark_ok(zone, order, low_wmark_pages(zone),
1527
					ac->classzone_idx, alloc_flags)) {
1528 1529 1530 1531 1532 1533 1534
			/*
			 * We think the allocation will succeed in this zone,
			 * but it is not certain, hence the false. The caller
			 * will repeat this with true if allocation indeed
			 * succeeds in this zone.
			 */
			compaction_defer_reset(zone, order, false);
1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
			/*
			 * It is possible that async compaction aborted due to
			 * need_resched() and the watermarks were ok thanks to
			 * somebody else freeing memory. The allocation can
			 * however still fail so we better signal the
			 * need_resched() contention anyway (this will not
			 * prevent the allocation attempt).
			 */
			if (zone_contended == COMPACT_CONTENDED_SCHED)
				*contended = COMPACT_CONTENDED_SCHED;

			goto break_loop;
		}

1549
		if (mode != MIGRATE_ASYNC && status == COMPACT_COMPLETE) {
1550 1551 1552 1553 1554 1555 1556
			/*
			 * We think that allocation won't succeed in this zone
			 * so we defer compaction there. If it ends up
			 * succeeding after all, it will be reset.
			 */
			defer_compaction(zone, order);
		}
1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577

		/*
		 * We might have stopped compacting due to need_resched() in
		 * async compaction, or due to a fatal signal detected. In that
		 * case do not try further zones and signal need_resched()
		 * contention.
		 */
		if ((zone_contended == COMPACT_CONTENDED_SCHED)
					|| fatal_signal_pending(current)) {
			*contended = COMPACT_CONTENDED_SCHED;
			goto break_loop;
		}

		continue;
break_loop:
		/*
		 * We might not have tried all the zones, so  be conservative
		 * and assume they are not all lock contended.
		 */
		all_zones_contended = 0;
		break;
1578 1579
	}

1580 1581 1582 1583 1584 1585 1586
	/*
	 * If at least one zone wasn't deferred or skipped, we report if all
	 * zones that were tried were lock contended.
	 */
	if (rc > COMPACT_SKIPPED && all_zones_contended)
		*contended = COMPACT_CONTENDED_LOCK;

1587 1588 1589 1590
	return rc;
}


1591
/* Compact all zones within a node */
1592
static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1593 1594 1595 1596 1597 1598 1599 1600 1601 1602
{
	int zoneid;
	struct zone *zone;

	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {

		zone = &pgdat->node_zones[zoneid];
		if (!populated_zone(zone))
			continue;

1603 1604 1605 1606 1607
		cc->nr_freepages = 0;
		cc->nr_migratepages = 0;
		cc->zone = zone;
		INIT_LIST_HEAD(&cc->freepages);
		INIT_LIST_HEAD(&cc->migratepages);
1608

1609 1610 1611 1612 1613 1614 1615 1616
		/*
		 * When called via /proc/sys/vm/compact_memory
		 * this makes sure we compact the whole zone regardless of
		 * cached scanner positions.
		 */
		if (cc->order == -1)
			__reset_isolation_suitable(zone);

1617
		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
1618
			compact_zone(zone, cc);
1619

1620
		if (cc->order > 0) {
1621 1622 1623
			if (zone_watermark_ok(zone, cc->order,
						low_wmark_pages(zone), 0, 0))
				compaction_defer_reset(zone, cc->order, false);
1624 1625
		}

1626 1627
		VM_BUG_ON(!list_empty(&cc->freepages));
		VM_BUG_ON(!list_empty(&cc->migratepages));
1628 1629 1630
	}
}

1631
void compact_pgdat(pg_data_t *pgdat, int order)
1632 1633 1634
{
	struct compact_control cc = {
		.order = order,
1635
		.mode = MIGRATE_ASYNC,
1636 1637
	};

1638 1639 1640
	if (!order)
		return;

1641
	__compact_pgdat(pgdat, &cc);
1642 1643
}

1644
static void compact_node(int nid)
1645 1646 1647
{
	struct compact_control cc = {
		.order = -1,
1648
		.mode = MIGRATE_SYNC,
1649
		.ignore_skip_hint = true,
1650 1651
	};

1652
	__compact_pgdat(NODE_DATA(nid), &cc);
1653 1654
}

1655
/* Compact all nodes in the system */
1656
static void compact_nodes(void)
1657 1658 1659
{
	int nid;

1660 1661 1662
	/* Flush pending updates to the LRU lists */
	lru_add_drain_all();

1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674
	for_each_online_node(nid)
		compact_node(nid);
}

/* The written value is actually unused, all memory is compacted */
int sysctl_compact_memory;

/* This is the entry point for compacting all nodes via /proc/sys/vm */
int sysctl_compaction_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	if (write)
1675
		compact_nodes();
1676 1677 1678

	return 0;
}
1679

1680 1681 1682 1683 1684 1685 1686 1687
int sysctl_extfrag_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	proc_dointvec_minmax(table, write, buffer, length, ppos);

	return 0;
}

1688
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1689
static ssize_t sysfs_compact_node(struct device *dev,
1690
			struct device_attribute *attr,
1691 1692
			const char *buf, size_t count)
{
1693 1694 1695 1696 1697 1698 1699 1700
	int nid = dev->id;

	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
		/* Flush pending updates to the LRU lists */
		lru_add_drain_all();

		compact_node(nid);
	}
1701 1702 1703

	return count;
}
1704
static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1705 1706 1707

int compaction_register_node(struct node *node)
{
1708
	return device_create_file(&node->dev, &dev_attr_compact);
1709 1710 1711 1712
}

void compaction_unregister_node(struct node *node)
{
1713
	return device_remove_file(&node->dev, &dev_attr_compact);
1714 1715
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
1716 1717

#endif /* CONFIG_COMPACTION */