compaction.c 34.0 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14
/*
 * linux/mm/compaction.c
 *
 * Memory compaction for the reduction of external fragmentation. Note that
 * this heavily depends upon page migration to do all the real heavy
 * lifting
 *
 * Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
 */
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/compaction.h>
#include <linux/mm_inline.h>
#include <linux/backing-dev.h>
15
#include <linux/sysctl.h>
16
#include <linux/sysfs.h>
17
#include <linux/balloon_compaction.h>
18
#include <linux/page-isolation.h>
19 20
#include "internal.h"

21 22 23 24 25 26 27 28 29 30 31 32 33 34 35
#ifdef CONFIG_COMPACTION
static inline void count_compact_event(enum vm_event_item item)
{
	count_vm_event(item);
}

static inline void count_compact_events(enum vm_event_item item, long delta)
{
	count_vm_events(item, delta);
}
#else
#define count_compact_event(item) do { } while (0)
#define count_compact_events(item, delta) do { } while (0)
#endif

36 37
#if defined CONFIG_COMPACTION || defined CONFIG_CMA

38 39 40
#define CREATE_TRACE_POINTS
#include <trace/events/compaction.h>

41 42 43 44 45 46 47 48 49 50 51 52 53 54
static unsigned long release_freepages(struct list_head *freelist)
{
	struct page *page, *next;
	unsigned long count = 0;

	list_for_each_entry_safe(page, next, freelist, lru) {
		list_del(&page->lru);
		__free_page(page);
		count++;
	}

	return count;
}

55 56 57 58 59 60 61 62 63 64
static void map_pages(struct list_head *list)
{
	struct page *page;

	list_for_each_entry(page, list, lru) {
		arch_alloc_page(page, 0);
		kernel_map_pages(page, 1, 1);
	}
}

65 66 67 68 69
static inline bool migrate_async_suitable(int migratetype)
{
	return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
}

70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85
#ifdef CONFIG_COMPACTION
/* Returns true if the pageblock should be scanned for pages to isolate. */
static inline bool isolation_suitable(struct compact_control *cc,
					struct page *page)
{
	if (cc->ignore_skip_hint)
		return true;

	return !get_pageblock_skip(page);
}

/*
 * This function is called to clear all cached information on pageblocks that
 * should be skipped for page isolation when the migrate and free page scanner
 * meet.
 */
86
static void __reset_isolation_suitable(struct zone *zone)
87 88
{
	unsigned long start_pfn = zone->zone_start_pfn;
89
	unsigned long end_pfn = zone_end_pfn(zone);
90 91
	unsigned long pfn;

92 93
	zone->compact_cached_migrate_pfn = start_pfn;
	zone->compact_cached_free_pfn = end_pfn;
94
	zone->compact_blockskip_flush = false;
95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112

	/* Walk the zone and mark every pageblock as suitable for isolation */
	for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {
		struct page *page;

		cond_resched();

		if (!pfn_valid(pfn))
			continue;

		page = pfn_to_page(pfn);
		if (zone != page_zone(page))
			continue;

		clear_pageblock_skip(page);
	}
}

113 114 115 116 117 118 119 120 121 122 123 124 125 126 127
void reset_isolation_suitable(pg_data_t *pgdat)
{
	int zoneid;

	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {
		struct zone *zone = &pgdat->node_zones[zoneid];
		if (!populated_zone(zone))
			continue;

		/* Only flush if a full compaction finished recently */
		if (zone->compact_blockskip_flush)
			__reset_isolation_suitable(zone);
	}
}

128 129
/*
 * If no pages were isolated then mark this pageblock to be skipped in the
130
 * future. The information is later cleared by __reset_isolation_suitable().
131
 */
132 133 134
static void update_pageblock_skip(struct compact_control *cc,
			struct page *page, unsigned long nr_isolated,
			bool migrate_scanner)
135
{
136
	struct zone *zone = cc->zone;
137 138 139 140

	if (cc->ignore_skip_hint)
		return;

141 142 143
	if (!page)
		return;

144 145
	if (!nr_isolated) {
		unsigned long pfn = page_to_pfn(page);
146
		set_pageblock_skip(page);
147 148 149 150 151 152 153 154 155 156 157 158

		/* Update where compaction should restart */
		if (migrate_scanner) {
			if (!cc->finished_update_migrate &&
			    pfn > zone->compact_cached_migrate_pfn)
				zone->compact_cached_migrate_pfn = pfn;
		} else {
			if (!cc->finished_update_free &&
			    pfn < zone->compact_cached_free_pfn)
				zone->compact_cached_free_pfn = pfn;
		}
	}
159 160 161 162 163 164 165 166
}
#else
static inline bool isolation_suitable(struct compact_control *cc,
					struct page *page)
{
	return true;
}

167 168 169
static void update_pageblock_skip(struct compact_control *cc,
			struct page *page, unsigned long nr_isolated,
			bool migrate_scanner)
170 171 172 173
{
}
#endif /* CONFIG_COMPACTION */

174 175 176 177 178
static inline bool should_release_lock(spinlock_t *lock)
{
	return need_resched() || spin_is_contended(lock);
}

179 180 181 182 183 184 185 186 187 188 189 190
/*
 * Compaction requires the taking of some coarse locks that are potentially
 * very heavily contended. Check if the process needs to be scheduled or
 * if the lock is contended. For async compaction, back out in the event
 * if contention is severe. For sync compaction, schedule.
 *
 * Returns true if the lock is held.
 * Returns false if the lock is released and compaction should abort
 */
static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
				      bool locked, struct compact_control *cc)
{
191
	if (should_release_lock(lock)) {
192 193 194 195 196 197 198
		if (locked) {
			spin_unlock_irqrestore(lock, *flags);
			locked = false;
		}

		/* async aborts if taking too long or contended */
		if (!cc->sync) {
199
			cc->contended = true;
200 201 202 203 204 205 206 207 208 209 210
			return false;
		}

		cond_resched();
	}

	if (!locked)
		spin_lock_irqsave(lock, *flags);
	return true;
}

211 212 213
/* Returns true if the page is within a block suitable for migration to */
static bool suitable_migration_target(struct page *page)
{
214
	/* If the page is a large free page, then disallow migration */
215
	if (PageBuddy(page) && page_order(page) >= pageblock_order)
216
		return false;
217 218

	/* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
219
	if (migrate_async_suitable(get_pageblock_migratetype(page)))
220 221 222 223 224 225
		return true;

	/* Otherwise skip the block */
	return false;
}

226
/*
227 228 229
 * Isolate free pages onto a private freelist. If @strict is true, will abort
 * returning 0 on any invalid PFNs or non-free pages inside of the pageblock
 * (even though it may still end up isolating some pages).
230
 */
231 232
static unsigned long isolate_freepages_block(struct compact_control *cc,
				unsigned long blockpfn,
233 234 235
				unsigned long end_pfn,
				struct list_head *freelist,
				bool strict)
236
{
237
	int nr_scanned = 0, total_isolated = 0;
238
	struct page *cursor, *valid_page = NULL;
239 240
	unsigned long flags;
	bool locked = false;
241
	bool checked_pageblock = false;
242 243 244

	cursor = pfn_to_page(blockpfn);

245
	/* Isolate free pages. */
246 247 248 249
	for (; blockpfn < end_pfn; blockpfn++, cursor++) {
		int isolated, i;
		struct page *page = cursor;

250
		nr_scanned++;
251
		if (!pfn_valid_within(blockpfn))
252 253
			goto isolate_fail;

254 255
		if (!valid_page)
			valid_page = page;
256
		if (!PageBuddy(page))
257
			goto isolate_fail;
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272

		/*
		 * The zone lock must be held to isolate freepages.
		 * Unfortunately this is a very coarse lock and can be
		 * heavily contended if there are parallel allocations
		 * or parallel compactions. For async compaction do not
		 * spin on the lock and we acquire the lock as late as
		 * possible.
		 */
		locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
								locked, cc);
		if (!locked)
			break;

		/* Recheck this is a suitable migration target under lock */
273 274 275 276 277 278 279 280 281 282
		if (!strict && !checked_pageblock) {
			/*
			 * We need to check suitability of pageblock only once
			 * and this isolate_freepages_block() is called with
			 * pageblock range, so just check once is sufficient.
			 */
			checked_pageblock = true;
			if (!suitable_migration_target(page))
				break;
		}
283

284 285
		/* Recheck this is a buddy page under lock */
		if (!PageBuddy(page))
286
			goto isolate_fail;
287 288 289 290 291 292 293 294 295 296 297 298 299

		/* Found a free page, break it into order-0 pages */
		isolated = split_free_page(page);
		total_isolated += isolated;
		for (i = 0; i < isolated; i++) {
			list_add(&page->lru, freelist);
			page++;
		}

		/* If a page was split, advance to the end of it */
		if (isolated) {
			blockpfn += isolated - 1;
			cursor += isolated - 1;
300
			continue;
301
		}
302 303 304 305 306 307 308

isolate_fail:
		if (strict)
			break;
		else
			continue;

309 310
	}

311
	trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
312 313 314 315 316 317

	/*
	 * If strict isolation is requested by CMA then check that all the
	 * pages requested were isolated. If there were any failures, 0 is
	 * returned and CMA will fail.
	 */
318
	if (strict && blockpfn < end_pfn)
319 320 321 322 323
		total_isolated = 0;

	if (locked)
		spin_unlock_irqrestore(&cc->zone->lock, flags);

324 325
	/* Update the pageblock-skip if the whole pageblock was scanned */
	if (blockpfn == end_pfn)
326
		update_pageblock_skip(cc, valid_page, total_isolated, false);
327

328
	count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
329
	if (total_isolated)
330
		count_compact_events(COMPACTISOLATED, total_isolated);
331 332 333
	return total_isolated;
}

334 335 336 337 338 339 340 341 342 343 344 345 346
/**
 * isolate_freepages_range() - isolate free pages.
 * @start_pfn: The first PFN to start isolating.
 * @end_pfn:   The one-past-last PFN.
 *
 * Non-free pages, invalid PFNs, or zone boundaries within the
 * [start_pfn, end_pfn) range are considered errors, cause function to
 * undo its actions and return zero.
 *
 * Otherwise, function returns one-past-the-last PFN of isolated page
 * (which may be greater then end_pfn if end fell in a middle of
 * a free page).
 */
347
unsigned long
348 349
isolate_freepages_range(struct compact_control *cc,
			unsigned long start_pfn, unsigned long end_pfn)
350
{
351
	unsigned long isolated, pfn, block_end_pfn;
352 353 354
	LIST_HEAD(freelist);

	for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
355
		if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn)))
356 357 358 359 360 361 362 363 364
			break;

		/*
		 * On subsequent iterations ALIGN() is actually not needed,
		 * but we keep it that we not to complicate the code.
		 */
		block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
		block_end_pfn = min(block_end_pfn, end_pfn);

365
		isolated = isolate_freepages_block(cc, pfn, block_end_pfn,
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395
						   &freelist, true);

		/*
		 * In strict mode, isolate_freepages_block() returns 0 if
		 * there are any holes in the block (ie. invalid PFNs or
		 * non-free pages).
		 */
		if (!isolated)
			break;

		/*
		 * If we managed to isolate pages, it is always (1 << n) *
		 * pageblock_nr_pages for some non-negative n.  (Max order
		 * page may span two pageblocks).
		 */
	}

	/* split_free_page does not map the pages */
	map_pages(&freelist);

	if (pfn < end_pfn) {
		/* Loop terminated early, cleanup. */
		release_freepages(&freelist);
		return 0;
	}

	/* We don't use freelists for anything. */
	return pfn;
}

396
/* Update the number of anon and file isolated pages in the zone */
397
static void acct_isolated(struct zone *zone, bool locked, struct compact_control *cc)
398 399
{
	struct page *page;
400
	unsigned int count[2] = { 0, };
401

402 403
	list_for_each_entry(page, &cc->migratepages, lru)
		count[!!page_is_file_cache(page)]++;
404

405 406 407 408 409 410 411 412
	/* If locked we can use the interrupt unsafe versions */
	if (locked) {
		__mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
		__mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
	} else {
		mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
		mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
	}
413 414 415 416 417
}

/* Similar to reclaim, but different enough that they don't share logic */
static bool too_many_isolated(struct zone *zone)
{
418
	unsigned long active, inactive, isolated;
419 420 421

	inactive = zone_page_state(zone, NR_INACTIVE_FILE) +
					zone_page_state(zone, NR_INACTIVE_ANON);
422 423
	active = zone_page_state(zone, NR_ACTIVE_FILE) +
					zone_page_state(zone, NR_ACTIVE_ANON);
424 425 426
	isolated = zone_page_state(zone, NR_ISOLATED_FILE) +
					zone_page_state(zone, NR_ISOLATED_ANON);

427
	return isolated > (inactive + active) / 2;
428 429
}

430 431 432 433 434 435
/**
 * isolate_migratepages_range() - isolate all migrate-able pages in range.
 * @zone:	Zone pages are in.
 * @cc:		Compaction control structure.
 * @low_pfn:	The first PFN of the range.
 * @end_pfn:	The one-past-the-last PFN of the range.
M
Minchan Kim 已提交
436
 * @unevictable: true if it allows to isolate unevictable pages
437 438 439 440 441 442 443 444 445 446 447 448
 *
 * Isolate all pages that can be migrated from the range specified by
 * [low_pfn, end_pfn).  Returns zero if there is a fatal signal
 * pending), otherwise PFN of the first page that was not scanned
 * (which may be both less, equal to or more then end_pfn).
 *
 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
 * zero.
 *
 * Apart from cc->migratepages and cc->nr_migratetypes this function
 * does not modify any cc's fields, in particular it does not modify
 * (or read for that matter) cc->migrate_pfn.
449
 */
450
unsigned long
451
isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
M
Minchan Kim 已提交
452
		unsigned long low_pfn, unsigned long end_pfn, bool unevictable)
453
{
454
	unsigned long last_pageblock_nr = 0, pageblock_nr;
455
	unsigned long nr_scanned = 0, nr_isolated = 0;
456
	struct list_head *migratelist = &cc->migratepages;
457
	struct lruvec *lruvec;
458
	unsigned long flags;
459
	bool locked = false;
460
	struct page *page = NULL, *valid_page = NULL;
461
	bool skipped_async_unsuitable = false;
462 463
	const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
				    (unevictable ? ISOLATE_UNEVICTABLE : 0);
464 465 466 467 468 469 470

	/*
	 * Ensure that there are not too many pages isolated from the LRU
	 * list by either parallel reclaimers or compaction. If there are,
	 * delay for some time until fewer pages are isolated
	 */
	while (unlikely(too_many_isolated(zone))) {
471
		/* async migration should just abort */
472
		if (!cc->sync)
473
			return 0;
474

475 476 477
		congestion_wait(BLK_RW_ASYNC, HZ/10);

		if (fatal_signal_pending(current))
478
			return 0;
479 480 481
	}

	/* Time to isolate some pages for migration */
482
	cond_resched();
483
	for (; low_pfn < end_pfn; low_pfn++) {
484
		/* give a chance to irqs before checking need_resched() */
485
		if (locked && !(low_pfn % SWAP_CLUSTER_MAX)) {
486 487 488 489
			if (should_release_lock(&zone->lru_lock)) {
				spin_unlock_irqrestore(&zone->lru_lock, flags);
				locked = false;
			}
490
		}
491

492 493 494 495 496 497 498 499 500 501 502 503 504
		/*
		 * migrate_pfn does not necessarily start aligned to a
		 * pageblock. Ensure that pfn_valid is called when moving
		 * into a new MAX_ORDER_NR_PAGES range in case of large
		 * memory holes within the zone
		 */
		if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
			if (!pfn_valid(low_pfn)) {
				low_pfn += MAX_ORDER_NR_PAGES - 1;
				continue;
			}
		}

505 506
		if (!pfn_valid_within(low_pfn))
			continue;
507
		nr_scanned++;
508

509 510 511 512 513 514
		/*
		 * Get the page and ensure the page is within the same zone.
		 * See the comment in isolate_freepages about overlapping
		 * nodes. It is deliberate that the new zone lock is not taken
		 * as memory compaction should not move pages between nodes.
		 */
515
		page = pfn_to_page(low_pfn);
516 517 518
		if (page_zone(page) != zone)
			continue;

519 520 521 522 523
		if (!valid_page)
			valid_page = page;

		/* If isolation recently failed, do not retry */
		pageblock_nr = low_pfn >> pageblock_order;
524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542
		if (last_pageblock_nr != pageblock_nr) {
			int mt;

			last_pageblock_nr = pageblock_nr;
			if (!isolation_suitable(cc, page))
				goto next_pageblock;

			/*
			 * For async migration, also only scan in MOVABLE
			 * blocks. Async migration is optimistic to see if
			 * the minimum amount of work satisfies the allocation
			 */
			mt = get_pageblock_migratetype(page);
			if (!cc->sync && !migrate_async_suitable(mt)) {
				cc->finished_update_migrate = true;
				skipped_async_unsuitable = true;
				goto next_pageblock;
			}
		}
543

544 545 546 547
		/*
		 * Skip if free. page_order cannot be used without zone->lock
		 * as nothing prevents parallel allocations or buddy merging.
		 */
548 549 550
		if (PageBuddy(page))
			continue;

551 552 553 554 555 556 557 558 559
		/*
		 * Check may be lockless but that's ok as we recheck later.
		 * It's possible to migrate LRU pages and balloon pages
		 * Skip any other type of page
		 */
		if (!PageLRU(page)) {
			if (unlikely(balloon_page_movable(page))) {
				if (locked && balloon_page_isolate(page)) {
					/* Successfully isolated */
560
					goto isolate_success;
561 562
				}
			}
563
			continue;
564
		}
565 566

		/*
567 568 569 570 571 572 573 574
		 * PageLRU is set. lru_lock normally excludes isolation
		 * splitting and collapsing (collapsing has already happened
		 * if PageLRU is set) but the lock is not necessarily taken
		 * here and it is wasteful to take it just to check transhuge.
		 * Check TransHuge without lock and skip the whole pageblock if
		 * it's either a transhuge or hugetlbfs page, as calling
		 * compound_order() without preventing THP from splitting the
		 * page underneath us may return surprising results.
575
		 */
576 577 578 579 580 581 582
		if (PageTransHuge(page)) {
			if (!locked)
				goto next_pageblock;
			low_pfn += (1 << compound_order(page)) - 1;
			continue;
		}

583 584 585 586 587 588 589 590 591
		/*
		 * Migration will fail if an anonymous page is pinned in memory,
		 * so avoid taking lru_lock and isolating it unnecessarily in an
		 * admittedly racy check.
		 */
		if (!page_mapping(page) &&
		    page_count(page) > page_mapcount(page))
			continue;

592 593 594 595 596 597 598 599 600
		/* Check if it is ok to still hold the lock */
		locked = compact_checklock_irqsave(&zone->lru_lock, &flags,
								locked, cc);
		if (!locked || fatal_signal_pending(current))
			break;

		/* Recheck PageLRU and PageTransHuge under lock */
		if (!PageLRU(page))
			continue;
601 602 603 604 605
		if (PageTransHuge(page)) {
			low_pfn += (1 << compound_order(page)) - 1;
			continue;
		}

606 607
		lruvec = mem_cgroup_page_lruvec(page, zone);

608
		/* Try isolate the page */
609
		if (__isolate_lru_page(page, mode) != 0)
610 611
			continue;

612
		VM_BUG_ON_PAGE(PageTransCompound(page), page);
613

614
		/* Successfully isolated */
615
		del_page_from_lru_list(page, lruvec, page_lru(page));
616 617 618

isolate_success:
		cc->finished_update_migrate = true;
619 620
		list_add(&page->lru, migratelist);
		cc->nr_migratepages++;
621
		nr_isolated++;
622 623

		/* Avoid isolating too much */
624 625
		if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) {
			++low_pfn;
626
			break;
627
		}
628 629 630 631

		continue;

next_pageblock:
632
		low_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages) - 1;
633 634
	}

635
	acct_isolated(zone, locked, cc);
636

637 638
	if (locked)
		spin_unlock_irqrestore(&zone->lru_lock, flags);
639

640 641 642 643 644 645 646
	/*
	 * Update the pageblock-skip information and cached scanner pfn,
	 * if the whole pageblock was scanned without isolating any page.
	 * This is not done when pageblock was skipped due to being unsuitable
	 * for async compaction, so that eventual sync compaction can try.
	 */
	if (low_pfn == end_pfn && !skipped_async_unsuitable)
647
		update_pageblock_skip(cc, valid_page, nr_isolated, true);
648

649 650
	trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);

651
	count_compact_events(COMPACTMIGRATE_SCANNED, nr_scanned);
652
	if (nr_isolated)
653
		count_compact_events(COMPACTISOLATED, nr_isolated);
654

655 656 657
	return low_pfn;
}

658 659
#endif /* CONFIG_COMPACTION || CONFIG_CMA */
#ifdef CONFIG_COMPACTION
660
/*
661 662
 * Based on information in the current compact_control, find blocks
 * suitable for isolating free pages from and then isolate them.
663
 */
664 665
static void isolate_freepages(struct zone *zone,
				struct compact_control *cc)
666
{
667
	struct page *page;
668 669 670 671
	unsigned long block_start_pfn;	/* start of current pageblock */
	unsigned long block_end_pfn;	/* end of current pageblock */
	unsigned long low_pfn;	     /* lowest pfn scanner is able to scan */
	unsigned long next_free_pfn; /* start pfn for scaning at next round */
672 673
	int nr_freepages = cc->nr_freepages;
	struct list_head *freelist = &cc->freepages;
674

675 676
	/*
	 * Initialise the free scanner. The starting point is where we last
677 678
	 * successfully isolated from, zone-cached value, or the end of the
	 * zone when isolating for the first time. We need this aligned to
679 680 681 682
	 * the pageblock boundary, because we do
	 * block_start_pfn -= pageblock_nr_pages in the for loop.
	 * For ending point, take care when isolating in last pageblock of a
	 * a zone which ends in the middle of a pageblock.
683 684
	 * The low boundary is the end of the pageblock the migration scanner
	 * is using.
685
	 */
686 687 688
	block_start_pfn = cc->free_pfn & ~(pageblock_nr_pages-1);
	block_end_pfn = min(block_start_pfn + pageblock_nr_pages,
						zone_end_pfn(zone));
689
	low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
690

691
	/*
692 693
	 * If no pages are isolated, the block_start_pfn < low_pfn check
	 * will kick in.
694
	 */
695
	next_free_pfn = 0;
696

697 698 699 700 701
	/*
	 * Isolate free pages until enough are available to migrate the
	 * pages on cc->migratepages. We stop searching if the migrate
	 * and free page scanners meet or enough free pages are isolated.
	 */
702 703 704
	for (; block_start_pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
				block_end_pfn = block_start_pfn,
				block_start_pfn -= pageblock_nr_pages) {
705
		unsigned long isolated;
706

707 708 709 710 711 712 713
		/*
		 * This can iterate a massively long zone without finding any
		 * suitable migration targets, so periodically check if we need
		 * to schedule.
		 */
		cond_resched();

714
		if (!pfn_valid(block_start_pfn))
715
			continue;
716

717 718 719 720 721 722 723
		/*
		 * Check for overlapping nodes/zones. It's possible on some
		 * configurations to have a setup like
		 * node0 node1 node0
		 * i.e. it's possible that all pages within a zones range of
		 * pages do not belong to a single zone.
		 */
724
		page = pfn_to_page(block_start_pfn);
725 726 727 728
		if (page_zone(page) != zone)
			continue;

		/* Check the block is suitable for migration */
729
		if (!suitable_migration_target(page))
730
			continue;
731

732 733 734 735
		/* If isolation recently failed, do not retry */
		if (!isolation_suitable(cc, page))
			continue;

736
		/* Found a block suitable for isolating free pages from */
737 738
		isolated = isolate_freepages_block(cc, block_start_pfn,
					block_end_pfn, freelist, false);
739
		nr_freepages += isolated;
740 741 742 743 744 745

		/*
		 * Record the highest PFN we isolated pages from. When next
		 * looking for free pages, the search will restart here as
		 * page migration may have returned some pages to the allocator
		 */
746
		if (isolated && next_free_pfn == 0) {
747
			cc->finished_update_free = true;
748
			next_free_pfn = block_start_pfn;
749
		}
750 751 752 753 754
	}

	/* split_free_page does not map the pages */
	map_pages(freelist);

755 756 757 758
	/*
	 * If we crossed the migrate scanner, we want to keep it that way
	 * so that compact_finished() may detect this
	 */
759 760 761 762
	if (block_start_pfn < low_pfn)
		next_free_pfn = cc->migrate_pfn;

	cc->free_pfn = next_free_pfn;
763
	cc->nr_freepages = nr_freepages;
764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
}

/*
 * This is a migrate-callback that "allocates" freepages by taking pages
 * from the isolated freelists in the block we are migrating to.
 */
static struct page *compaction_alloc(struct page *migratepage,
					unsigned long data,
					int **result)
{
	struct compact_control *cc = (struct compact_control *)data;
	struct page *freepage;

	/* Isolate free pages if necessary */
	if (list_empty(&cc->freepages)) {
		isolate_freepages(cc->zone, cc);

		if (list_empty(&cc->freepages))
			return NULL;
	}

	freepage = list_entry(cc->freepages.next, struct page, lru);
	list_del(&freepage->lru);
	cc->nr_freepages--;

	return freepage;
}

/*
793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808
 * This is a migrate-callback that "frees" freepages back to the isolated
 * freelist.  All pages on the freelist are from the same zone, so there is no
 * special handling needed for NUMA.
 */
static void compaction_free(struct page *page, unsigned long data)
{
	struct compact_control *cc = (struct compact_control *)data;

	list_add(&page->lru, &cc->freepages);
	cc->nr_freepages++;
}

/*
 * We cannot control nr_migratepages fully when migration is running as
 * migrate_pages() has no knowledge of of compact_control.  When migration is
 * complete, we count the number of pages on the list by hand.
809 810 811 812 813 814 815 816 817 818 819 820
 */
static void update_nr_listpages(struct compact_control *cc)
{
	int nr_migratepages = 0;
	struct page *page;

	list_for_each_entry(page, &cc->migratepages, lru)
		nr_migratepages++;

	cc->nr_migratepages = nr_migratepages;
}

821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840
/* possible outcome of isolate_migratepages */
typedef enum {
	ISOLATE_ABORT,		/* Abort compaction now */
	ISOLATE_NONE,		/* No pages isolated, continue scanning */
	ISOLATE_SUCCESS,	/* Pages isolated, migrate */
} isolate_migrate_t;

/*
 * Isolate all pages that can be migrated from the block pointed to by
 * the migrate scanner within compact_control.
 */
static isolate_migrate_t isolate_migratepages(struct zone *zone,
					struct compact_control *cc)
{
	unsigned long low_pfn, end_pfn;

	/* Do not scan outside zone boundaries */
	low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);

	/* Only scan within a pageblock boundary */
841
	end_pfn = ALIGN(low_pfn + 1, pageblock_nr_pages);
842 843 844 845 846 847 848 849

	/* Do not cross the free scanner or scan within a memory hole */
	if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
		cc->migrate_pfn = end_pfn;
		return ISOLATE_NONE;
	}

	/* Perform the isolation */
M
Minchan Kim 已提交
850
	low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn, false);
851
	if (!low_pfn || cc->contended)
852 853 854 855 856 857 858
		return ISOLATE_ABORT;

	cc->migrate_pfn = low_pfn;

	return ISOLATE_SUCCESS;
}

859
static int compact_finished(struct zone *zone,
860
			    struct compact_control *cc)
861
{
862
	unsigned int order;
863
	unsigned long watermark;
864

865 866 867
	if (fatal_signal_pending(current))
		return COMPACT_PARTIAL;

868
	/* Compaction run completes if the migrate and free scanner meet */
869
	if (cc->free_pfn <= cc->migrate_pfn) {
870 871 872 873
		/* Let the next compaction start anew. */
		zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
		zone->compact_cached_free_pfn = zone_end_pfn(zone);

874 875 876 877 878 879 880 881 882
		/*
		 * Mark that the PG_migrate_skip information should be cleared
		 * by kswapd when it goes to sleep. kswapd does not set the
		 * flag itself as the decision to be clear should be directly
		 * based on an allocation request.
		 */
		if (!current_is_kswapd())
			zone->compact_blockskip_flush = true;

883
		return COMPACT_COMPLETE;
884
	}
885

886 887 888 889
	/*
	 * order == -1 is expected when compacting via
	 * /proc/sys/vm/compact_memory
	 */
890 891 892
	if (cc->order == -1)
		return COMPACT_CONTINUE;

893 894 895 896 897 898 899
	/* Compaction run is not finished if the watermark is not met */
	watermark = low_wmark_pages(zone);
	watermark += (1 << cc->order);

	if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
		return COMPACT_CONTINUE;

900
	/* Direct compactor: Is a suitable page free? */
901 902 903 904 905 906 907 908 909
	for (order = cc->order; order < MAX_ORDER; order++) {
		struct free_area *area = &zone->free_area[order];

		/* Job done if page is free of the right migratetype */
		if (!list_empty(&area->free_list[cc->migratetype]))
			return COMPACT_PARTIAL;

		/* Job done if allocation would set block type */
		if (cc->order >= pageblock_order && area->nr_free)
910 911 912
			return COMPACT_PARTIAL;
	}

913 914 915
	return COMPACT_CONTINUE;
}

916 917 918 919 920 921 922 923 924 925 926 927
/*
 * compaction_suitable: Is this suitable to run compaction on this zone now?
 * Returns
 *   COMPACT_SKIPPED  - If there are too few free pages for compaction
 *   COMPACT_PARTIAL  - If the allocation would succeed without compaction
 *   COMPACT_CONTINUE - If compaction should run now
 */
unsigned long compaction_suitable(struct zone *zone, int order)
{
	int fragindex;
	unsigned long watermark;

928 929 930 931 932 933 934
	/*
	 * order == -1 is expected when compacting via
	 * /proc/sys/vm/compact_memory
	 */
	if (order == -1)
		return COMPACT_CONTINUE;

935 936 937 938 939 940 941 942 943 944 945 946 947
	/*
	 * Watermarks for order-0 must be met for compaction. Note the 2UL.
	 * This is because during migration, copies of pages need to be
	 * allocated and for a short time, the footprint is higher
	 */
	watermark = low_wmark_pages(zone) + (2UL << order);
	if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
		return COMPACT_SKIPPED;

	/*
	 * fragmentation index determines if allocation failures are due to
	 * low memory or external fragmentation
	 *
948 949
	 * index of -1000 implies allocations might succeed depending on
	 * watermarks
950 951 952 953 954 955 956 957 958
	 * index towards 0 implies failure is due to lack of memory
	 * index towards 1000 implies failure is due to fragmentation
	 *
	 * Only compact if a failure would be due to fragmentation.
	 */
	fragindex = fragmentation_index(zone, order);
	if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
		return COMPACT_SKIPPED;

959 960
	if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
	    0, 0))
961 962 963 964 965
		return COMPACT_PARTIAL;

	return COMPACT_CONTINUE;
}

966 967 968
static int compact_zone(struct zone *zone, struct compact_control *cc)
{
	int ret;
969
	unsigned long start_pfn = zone->zone_start_pfn;
970
	unsigned long end_pfn = zone_end_pfn(zone);
971

972 973 974 975 976 977 978 979 980 981 982
	ret = compaction_suitable(zone, cc->order);
	switch (ret) {
	case COMPACT_PARTIAL:
	case COMPACT_SKIPPED:
		/* Compaction is likely to fail */
		return ret;
	case COMPACT_CONTINUE:
		/* Fall through to compaction */
		;
	}

983 984 985 986 987 988 989 990
	/*
	 * Clear pageblock skip if there were failures recently and compaction
	 * is about to be retried after being deferred. kswapd does not do
	 * this reset as it'll reset the cached information when going to sleep.
	 */
	if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
		__reset_isolation_suitable(zone);

991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005
	/*
	 * Setup to move all movable pages to the end of the zone. Used cached
	 * information on where the scanners should start but check that it
	 * is initialised by ensuring the values are within zone boundaries.
	 */
	cc->migrate_pfn = zone->compact_cached_migrate_pfn;
	cc->free_pfn = zone->compact_cached_free_pfn;
	if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
		cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
		zone->compact_cached_free_pfn = cc->free_pfn;
	}
	if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
		cc->migrate_pfn = start_pfn;
		zone->compact_cached_migrate_pfn = cc->migrate_pfn;
	}
1006

1007 1008
	trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);

1009 1010 1011 1012
	migrate_prep_local();

	while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
		unsigned long nr_migrate, nr_remaining;
1013
		int err;
1014

1015 1016 1017
		switch (isolate_migratepages(zone, cc)) {
		case ISOLATE_ABORT:
			ret = COMPACT_PARTIAL;
1018
			putback_movable_pages(&cc->migratepages);
1019
			cc->nr_migratepages = 0;
1020 1021
			goto out;
		case ISOLATE_NONE:
1022
			continue;
1023 1024 1025
		case ISOLATE_SUCCESS:
			;
		}
1026 1027

		nr_migrate = cc->nr_migratepages;
1028 1029
		err = migrate_pages(&cc->migratepages, compaction_alloc,
				compaction_free, (unsigned long)cc,
1030 1031
				cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC,
				MR_COMPACTION);
1032 1033 1034
		update_nr_listpages(cc);
		nr_remaining = cc->nr_migratepages;

1035 1036
		trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
						nr_remaining);
1037

1038
		/* Release isolated pages not migrated */
1039
		if (err) {
1040
			putback_movable_pages(&cc->migratepages);
1041
			cc->nr_migratepages = 0;
1042 1043 1044 1045 1046
			/*
			 * migrate_pages() may return -ENOMEM when scanners meet
			 * and we want compact_finished() to detect it
			 */
			if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
1047 1048 1049
				ret = COMPACT_PARTIAL;
				goto out;
			}
1050 1051 1052
		}
	}

1053
out:
1054 1055 1056 1057
	/* Release free pages and check accounting */
	cc->nr_freepages -= release_freepages(&cc->freepages);
	VM_BUG_ON(cc->nr_freepages != 0);

1058 1059
	trace_mm_compaction_end(ret);

1060 1061
	return ret;
}
1062

1063
static unsigned long compact_zone_order(struct zone *zone,
1064
				 int order, gfp_t gfp_mask,
1065
				 bool sync, bool *contended)
1066
{
1067
	unsigned long ret;
1068 1069 1070 1071 1072 1073
	struct compact_control cc = {
		.nr_freepages = 0,
		.nr_migratepages = 0,
		.order = order,
		.migratetype = allocflags_to_migratetype(gfp_mask),
		.zone = zone,
1074
		.sync = sync,
1075 1076 1077 1078
	};
	INIT_LIST_HEAD(&cc.freepages);
	INIT_LIST_HEAD(&cc.migratepages);

1079 1080 1081 1082 1083 1084 1085
	ret = compact_zone(zone, &cc);

	VM_BUG_ON(!list_empty(&cc.freepages));
	VM_BUG_ON(!list_empty(&cc.migratepages));

	*contended = cc.contended;
	return ret;
1086 1087
}

1088 1089
int sysctl_extfrag_threshold = 500;

1090 1091 1092 1093 1094 1095
/**
 * try_to_compact_pages - Direct compact to satisfy a high-order allocation
 * @zonelist: The zonelist used for the current allocation
 * @order: The order of the current allocation
 * @gfp_mask: The GFP mask of the current allocation
 * @nodemask: The allowed nodes to allocate from
1096
 * @sync: Whether migration is synchronous or not
1097 1098
 * @contended: Return value that is true if compaction was aborted due to lock contention
 * @page: Optionally capture a free page of the requested order during compaction
1099 1100 1101 1102
 *
 * This is the main entry point for direct page compaction.
 */
unsigned long try_to_compact_pages(struct zonelist *zonelist,
1103
			int order, gfp_t gfp_mask, nodemask_t *nodemask,
1104
			bool sync, bool *contended)
1105 1106 1107 1108 1109 1110 1111
{
	enum zone_type high_zoneidx = gfp_zone(gfp_mask);
	int may_enter_fs = gfp_mask & __GFP_FS;
	int may_perform_io = gfp_mask & __GFP_IO;
	struct zoneref *z;
	struct zone *zone;
	int rc = COMPACT_SKIPPED;
1112
	int alloc_flags = 0;
1113

1114
	/* Check if the GFP flags allow compaction */
1115
	if (!order || !may_enter_fs || !may_perform_io)
1116 1117
		return rc;

1118
	count_compact_event(COMPACTSTALL);
1119

1120 1121 1122 1123
#ifdef CONFIG_CMA
	if (allocflags_to_migratetype(gfp_mask) == MIGRATE_MOVABLE)
		alloc_flags |= ALLOC_CMA;
#endif
1124 1125 1126 1127 1128
	/* Compact each zone in the list */
	for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
								nodemask) {
		int status;

1129
		status = compact_zone_order(zone, order, gfp_mask, sync,
1130
						contended);
1131 1132
		rc = max(status, rc);

1133
		/* If a normal allocation would succeed, stop compacting */
1134 1135
		if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0,
				      alloc_flags))
1136 1137 1138 1139 1140 1141 1142
			break;
	}

	return rc;
}


1143
/* Compact all zones within a node */
1144
static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1145 1146 1147 1148 1149 1150 1151 1152 1153 1154
{
	int zoneid;
	struct zone *zone;

	for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) {

		zone = &pgdat->node_zones[zoneid];
		if (!populated_zone(zone))
			continue;

1155 1156 1157 1158 1159
		cc->nr_freepages = 0;
		cc->nr_migratepages = 0;
		cc->zone = zone;
		INIT_LIST_HEAD(&cc->freepages);
		INIT_LIST_HEAD(&cc->migratepages);
1160

1161
		if (cc->order == -1 || !compaction_deferred(zone, cc->order))
1162
			compact_zone(zone, cc);
1163

1164
		if (cc->order > 0) {
1165 1166 1167
			if (zone_watermark_ok(zone, cc->order,
						low_wmark_pages(zone), 0, 0))
				compaction_defer_reset(zone, cc->order, false);
1168 1169
		}

1170 1171
		VM_BUG_ON(!list_empty(&cc->freepages));
		VM_BUG_ON(!list_empty(&cc->migratepages));
1172 1173 1174
	}
}

1175
void compact_pgdat(pg_data_t *pgdat, int order)
1176 1177 1178
{
	struct compact_control cc = {
		.order = order,
1179
		.sync = false,
1180 1181
	};

1182 1183 1184
	if (!order)
		return;

1185
	__compact_pgdat(pgdat, &cc);
1186 1187
}

1188
static void compact_node(int nid)
1189 1190 1191
{
	struct compact_control cc = {
		.order = -1,
1192
		.sync = true,
1193
		.ignore_skip_hint = true,
1194 1195
	};

1196
	__compact_pgdat(NODE_DATA(nid), &cc);
1197 1198
}

1199
/* Compact all nodes in the system */
1200
static void compact_nodes(void)
1201 1202 1203
{
	int nid;

1204 1205 1206
	/* Flush pending updates to the LRU lists */
	lru_add_drain_all();

1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	for_each_online_node(nid)
		compact_node(nid);
}

/* The written value is actually unused, all memory is compacted */
int sysctl_compact_memory;

/* This is the entry point for compacting all nodes via /proc/sys/vm */
int sysctl_compaction_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	if (write)
1219
		compact_nodes();
1220 1221 1222

	return 0;
}
1223

1224 1225 1226 1227 1228 1229 1230 1231
int sysctl_extfrag_handler(struct ctl_table *table, int write,
			void __user *buffer, size_t *length, loff_t *ppos)
{
	proc_dointvec_minmax(table, write, buffer, length, ppos);

	return 0;
}

1232
#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
1233
static ssize_t sysfs_compact_node(struct device *dev,
1234
			struct device_attribute *attr,
1235 1236
			const char *buf, size_t count)
{
1237 1238 1239 1240 1241 1242 1243 1244
	int nid = dev->id;

	if (nid >= 0 && nid < nr_node_ids && node_online(nid)) {
		/* Flush pending updates to the LRU lists */
		lru_add_drain_all();

		compact_node(nid);
	}
1245 1246 1247

	return count;
}
1248
static DEVICE_ATTR(compact, S_IWUSR, NULL, sysfs_compact_node);
1249 1250 1251

int compaction_register_node(struct node *node)
{
1252
	return device_create_file(&node->dev, &dev_attr_compact);
1253 1254 1255 1256
}

void compaction_unregister_node(struct node *node)
{
1257
	return device_remove_file(&node->dev, &dev_attr_compact);
1258 1259
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
1260 1261

#endif /* CONFIG_COMPACTION */