page_alloc.c 248.8 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-only
L
Linus Torvalds 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 *  linux/mm/page_alloc.c
 *
 *  Manages the free list, the system allocates free pages here.
 *  Note that kmalloc() lives in slab.c
 *
 *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
 *  Swap reorganised 29.12.95, Stephen Tweedie
 *  Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
 *  Reshaped it to be a zoned allocator, Ingo Molnar, Red Hat, 1999
 *  Discontiguous memory support, Kanoj Sarcar, SGI, Nov 1999
 *  Zone balancing, Kanoj Sarcar, SGI, Jan 2000
 *  Per cpu hot/cold page lists, bulk allocation, Martin J. Bligh, Sept 2002
 *          (lots of bits borrowed from Ingo Molnar & Andrew Morton)
 */

#include <linux/stddef.h>
#include <linux/mm.h>
20
#include <linux/highmem.h>
L
Linus Torvalds 已提交
21 22 23
#include <linux/swap.h>
#include <linux/interrupt.h>
#include <linux/pagemap.h>
24
#include <linux/jiffies.h>
25
#include <linux/memblock.h>
L
Linus Torvalds 已提交
26
#include <linux/compiler.h>
27
#include <linux/kernel.h>
28
#include <linux/kasan.h>
L
Linus Torvalds 已提交
29 30 31 32 33
#include <linux/module.h>
#include <linux/suspend.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
34
#include <linux/ratelimit.h>
35
#include <linux/oom.h>
L
Linus Torvalds 已提交
36 37 38 39
#include <linux/topology.h>
#include <linux/sysctl.h>
#include <linux/cpu.h>
#include <linux/cpuset.h>
40
#include <linux/memory_hotplug.h>
L
Linus Torvalds 已提交
41 42
#include <linux/nodemask.h>
#include <linux/vmalloc.h>
43
#include <linux/vmstat.h>
44
#include <linux/mempolicy.h>
45
#include <linux/memremap.h>
46
#include <linux/stop_machine.h>
47
#include <linux/random.h>
48 49
#include <linux/sort.h>
#include <linux/pfn.h>
50
#include <linux/backing-dev.h>
51
#include <linux/fault-inject.h>
K
KAMEZAWA Hiroyuki 已提交
52
#include <linux/page-isolation.h>
53
#include <linux/debugobjects.h>
54
#include <linux/kmemleak.h>
55
#include <linux/compaction.h>
56
#include <trace/events/kmem.h>
57
#include <trace/events/oom.h>
58
#include <linux/prefetch.h>
59
#include <linux/mm_inline.h>
60
#include <linux/mmu_notifier.h>
61
#include <linux/migrate.h>
62
#include <linux/hugetlb.h>
63
#include <linux/sched/rt.h>
64
#include <linux/sched/mm.h>
65
#include <linux/page_owner.h>
66
#include <linux/kthread.h>
67
#include <linux/memcontrol.h>
68
#include <linux/ftrace.h>
69
#include <linux/lockdep.h>
70
#include <linux/nmi.h>
71
#include <linux/psi.h>
72
#include <linux/padata.h>
73
#include <linux/khugepaged.h>
74
#include <linux/buffer_head.h>
L
Linus Torvalds 已提交
75

76
#include <asm/sections.h>
L
Linus Torvalds 已提交
77
#include <asm/tlbflush.h>
78
#include <asm/div64.h>
L
Linus Torvalds 已提交
79
#include "internal.h"
80
#include "shuffle.h"
A
Alexander Duyck 已提交
81
#include "page_reporting.h"
L
Linus Torvalds 已提交
82

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98
/* Free Page Internal flags: for internal, non-pcp variants of free_pages(). */
typedef int __bitwise fpi_t;

/* No special request */
#define FPI_NONE		((__force fpi_t)0)

/*
 * Skip free page reporting notification for the (possibly merged) page.
 * This does not hinder free page reporting from grabbing the page,
 * reporting it and marking it "reported" -  it only skips notifying
 * the free page reporting infrastructure about a newly freed page. For
 * example, used when temporarily pulling a page from a freelist and
 * putting it back unmodified.
 */
#define FPI_SKIP_REPORT_NOTIFY	((__force fpi_t)BIT(0))

99 100 101 102 103 104 105 106 107 108 109 110
/*
 * Place the (possibly merged) page to the tail of the freelist. Will ignore
 * page shuffling (relevant code - e.g., memory onlining - is expected to
 * shuffle the whole zone).
 *
 * Note: No code should rely on this flag for correctness - it's purely
 *       to allow for optimizations when handing back either fresh pages
 *       (memory onlining) or untouched pages (page isolation, free page
 *       reporting).
 */
#define FPI_TO_TAIL		((__force fpi_t)BIT(1))

111 112
/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
static DEFINE_MUTEX(pcp_batch_high_lock);
113
#define MIN_PERCPU_PAGELIST_FRACTION	(8)
114

115 116 117 118 119
#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
DEFINE_PER_CPU(int, numa_node);
EXPORT_PER_CPU_SYMBOL(numa_node);
#endif

120 121
DEFINE_STATIC_KEY_TRUE(vm_numa_stat_key);

122 123 124 125 126 127 128 129 130 131 132
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * N.B., Do NOT reference the '_numa_mem_' per cpu variable directly.
 * It will not be defined when CONFIG_HAVE_MEMORYLESS_NODES is not defined.
 * Use the accessor functions set_numa_mem(), numa_mem_id() and cpu_to_mem()
 * defined in <linux/topology.h>.
 */
DEFINE_PER_CPU(int, _numa_mem_);		/* Kernel "local memory" node */
EXPORT_PER_CPU_SYMBOL(_numa_mem_);
#endif

133
/* work_structs for global per-cpu drains */
134 135 136 137
struct pcpu_drain {
	struct zone *zone;
	struct work_struct work;
};
138 139
static DEFINE_MUTEX(pcpu_drain_mutex);
static DEFINE_PER_CPU(struct pcpu_drain, pcpu_drain);
140

141
#ifdef CONFIG_GCC_PLUGIN_LATENT_ENTROPY
142
volatile unsigned long latent_entropy __latent_entropy;
143 144 145
EXPORT_SYMBOL(latent_entropy);
#endif

L
Linus Torvalds 已提交
146
/*
147
 * Array of node states.
L
Linus Torvalds 已提交
148
 */
149 150 151 152 153 154 155
nodemask_t node_states[NR_NODE_STATES] __read_mostly = {
	[N_POSSIBLE] = NODE_MASK_ALL,
	[N_ONLINE] = { { [0] = 1UL } },
#ifndef CONFIG_NUMA
	[N_NORMAL_MEMORY] = { { [0] = 1UL } },
#ifdef CONFIG_HIGHMEM
	[N_HIGH_MEMORY] = { { [0] = 1UL } },
156 157
#endif
	[N_MEMORY] = { { [0] = 1UL } },
158 159 160 161 162
	[N_CPU] = { { [0] = 1UL } },
#endif	/* NUMA */
};
EXPORT_SYMBOL(node_states);

163 164
atomic_long_t _totalram_pages __read_mostly;
EXPORT_SYMBOL(_totalram_pages);
165
unsigned long totalreserve_pages __read_mostly;
166
unsigned long totalcma_pages __read_mostly;
167

168
int percpu_pagelist_fraction;
169
gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
170 171 172 173 174 175
DEFINE_STATIC_KEY_FALSE(init_on_alloc);
EXPORT_SYMBOL(init_on_alloc);

DEFINE_STATIC_KEY_FALSE(init_on_free);
EXPORT_SYMBOL(init_on_free);

176 177
static bool _init_on_alloc_enabled_early __read_mostly
				= IS_ENABLED(CONFIG_INIT_ON_ALLOC_DEFAULT_ON);
178 179 180
static int __init early_init_on_alloc(char *buf)
{

181
	return kstrtobool(buf, &_init_on_alloc_enabled_early);
182 183 184
}
early_param("init_on_alloc", early_init_on_alloc);

185 186
static bool _init_on_free_enabled_early __read_mostly
				= IS_ENABLED(CONFIG_INIT_ON_FREE_DEFAULT_ON);
187 188
static int __init early_init_on_free(char *buf)
{
189
	return kstrtobool(buf, &_init_on_free_enabled_early);
190 191
}
early_param("init_on_free", early_init_on_free);
L
Linus Torvalds 已提交
192

193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210
/*
 * A cached value of the page's pageblock's migratetype, used when the page is
 * put on a pcplist. Used to avoid the pageblock migratetype lookup when
 * freeing from pcplists in most cases, at the cost of possibly becoming stale.
 * Also the migratetype set in the page does not necessarily match the pcplist
 * index, e.g. page might have MIGRATE_CMA set but be on a pcplist with any
 * other index - this ensures that it will be put on the correct CMA freelist.
 */
static inline int get_pcppage_migratetype(struct page *page)
{
	return page->index;
}

static inline void set_pcppage_migratetype(struct page *page, int migratetype)
{
	page->index = migratetype;
}

211 212 213 214 215
#ifdef CONFIG_PM_SLEEP
/*
 * The following functions are used by the suspend/hibernate code to temporarily
 * change gfp_allowed_mask in order to avoid using I/O during memory allocations
 * while devices are suspended.  To avoid races with the suspend/hibernate code,
216 217 218 219
 * they should always be called with system_transition_mutex held
 * (gfp_allowed_mask also should only be modified with system_transition_mutex
 * held, unless the suspend/hibernate code is guaranteed not to run in parallel
 * with that modification).
220
 */
221 222 223 224

static gfp_t saved_gfp_mask;

void pm_restore_gfp_mask(void)
225
{
226
	WARN_ON(!mutex_is_locked(&system_transition_mutex));
227 228 229 230
	if (saved_gfp_mask) {
		gfp_allowed_mask = saved_gfp_mask;
		saved_gfp_mask = 0;
	}
231 232
}

233
void pm_restrict_gfp_mask(void)
234
{
235
	WARN_ON(!mutex_is_locked(&system_transition_mutex));
236 237
	WARN_ON(saved_gfp_mask);
	saved_gfp_mask = gfp_allowed_mask;
238
	gfp_allowed_mask &= ~(__GFP_IO | __GFP_FS);
239
}
240 241 242

bool pm_suspended_storage(void)
{
243
	if ((gfp_allowed_mask & (__GFP_IO | __GFP_FS)) == (__GFP_IO | __GFP_FS))
244 245 246
		return false;
	return true;
}
247 248
#endif /* CONFIG_PM_SLEEP */

249
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
250
unsigned int pageblock_order __read_mostly;
251 252
#endif

253 254
static void __free_pages_ok(struct page *page, unsigned int order,
			    fpi_t fpi_flags);
255

L
Linus Torvalds 已提交
256 257 258 259 260 261
/*
 * results with 256, 32 in the lowmem_reserve sysctl:
 *	1G machine -> (16M dma, 800M-16M normal, 1G-800M high)
 *	1G machine -> (16M dma, 784M normal, 224M high)
 *	NORMAL allocation will leave 784M/256 of ram reserved in the ZONE_DMA
 *	HIGHMEM allocation will leave 224M/32 of ram reserved in ZONE_NORMAL
Y
Yaowei Bai 已提交
262
 *	HIGHMEM allocation will leave (224M+784M)/256 of ram reserved in ZONE_DMA
A
Andi Kleen 已提交
263 264 265
 *
 * TBD: should special case ZONE_DMA32 machines here - in those we normally
 * don't need any ZONE_NORMAL reservation
L
Linus Torvalds 已提交
266
 */
267
int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES] = {
268
#ifdef CONFIG_ZONE_DMA
269
	[ZONE_DMA] = 256,
270
#endif
271
#ifdef CONFIG_ZONE_DMA32
272
	[ZONE_DMA32] = 256,
273
#endif
274
	[ZONE_NORMAL] = 32,
275
#ifdef CONFIG_HIGHMEM
276
	[ZONE_HIGHMEM] = 0,
277
#endif
278
	[ZONE_MOVABLE] = 0,
279
};
L
Linus Torvalds 已提交
280

281
static char * const zone_names[MAX_NR_ZONES] = {
282
#ifdef CONFIG_ZONE_DMA
283
	 "DMA",
284
#endif
285
#ifdef CONFIG_ZONE_DMA32
286
	 "DMA32",
287
#endif
288
	 "Normal",
289
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
290
	 "HighMem",
291
#endif
M
Mel Gorman 已提交
292
	 "Movable",
293 294 295
#ifdef CONFIG_ZONE_DEVICE
	 "Device",
#endif
296 297
};

298
const char * const migratetype_names[MIGRATE_TYPES] = {
299 300 301 302 303 304 305 306 307 308 309 310
	"Unmovable",
	"Movable",
	"Reclaimable",
	"HighAtomic",
#ifdef CONFIG_CMA
	"CMA",
#endif
#ifdef CONFIG_MEMORY_ISOLATION
	"Isolate",
#endif
};

311 312 313
compound_page_dtor * const compound_page_dtors[NR_COMPOUND_DTORS] = {
	[NULL_COMPOUND_DTOR] = NULL,
	[COMPOUND_PAGE_DTOR] = free_compound_page,
314
#ifdef CONFIG_HUGETLB_PAGE
315
	[HUGETLB_PAGE_DTOR] = free_huge_page,
316
#endif
317
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
318
	[TRANSHUGE_PAGE_DTOR] = free_transhuge_page,
319
#endif
320 321
};

L
Linus Torvalds 已提交
322
int min_free_kbytes = 1024;
323
int user_min_free_kbytes = -1;
324 325 326 327 328 329 330 331 332 333 334 335
#ifdef CONFIG_DISCONTIGMEM
/*
 * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges
 * are not on separate NUMA nodes. Functionally this works but with
 * watermark_boost_factor, it can reclaim prematurely as the ranges can be
 * quite small. By default, do not boost watermarks on discontigmem as in
 * many cases very high-order allocations like THP are likely to be
 * unsupported and the premature reclaim offsets the advantage of long-term
 * fragmentation avoidance.
 */
int watermark_boost_factor __read_mostly;
#else
336
int watermark_boost_factor __read_mostly = 15000;
337
#endif
338
int watermark_scale_factor = 10;
L
Linus Torvalds 已提交
339

340 341 342
static unsigned long nr_kernel_pages __initdata;
static unsigned long nr_all_pages __initdata;
static unsigned long dma_reserve __initdata;
L
Linus Torvalds 已提交
343

344 345
static unsigned long arch_zone_lowest_possible_pfn[MAX_NR_ZONES] __initdata;
static unsigned long arch_zone_highest_possible_pfn[MAX_NR_ZONES] __initdata;
346
static unsigned long required_kernelcore __initdata;
347
static unsigned long required_kernelcore_percent __initdata;
348
static unsigned long required_movablecore __initdata;
349
static unsigned long required_movablecore_percent __initdata;
350
static unsigned long zone_movable_pfn[MAX_NUMNODES] __initdata;
351
static bool mirrored_kernelcore __meminitdata;
T
Tejun Heo 已提交
352 353 354 355

/* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */
int movable_zone;
EXPORT_SYMBOL(movable_zone);
356

M
Miklos Szeredi 已提交
357
#if MAX_NUMNODES > 1
358
unsigned int nr_node_ids __read_mostly = MAX_NUMNODES;
359
unsigned int nr_online_nodes __read_mostly = 1;
M
Miklos Szeredi 已提交
360
EXPORT_SYMBOL(nr_node_ids);
361
EXPORT_SYMBOL(nr_online_nodes);
M
Miklos Szeredi 已提交
362 363
#endif

364 365
int page_group_by_mobility_disabled __read_mostly;

366
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392
/*
 * During boot we initialize deferred pages on-demand, as needed, but once
 * page_alloc_init_late() has finished, the deferred pages are all initialized,
 * and we can permanently disable that path.
 */
static DEFINE_STATIC_KEY_TRUE(deferred_pages);

/*
 * Calling kasan_free_pages() only after deferred memory initialization
 * has completed. Poisoning pages during deferred memory init will greatly
 * lengthen the process and cause problem in large memory systems as the
 * deferred pages initialization is done with interrupt disabled.
 *
 * Assuming that there will be no reference to those newly initialized
 * pages before they are ever allocated, this should have no effect on
 * KASAN memory tracking as the poison will be properly inserted at page
 * allocation time. The only corner case is when pages are allocated by
 * on-demand allocation and then freed again before the deferred pages
 * initialization is done, but this is not likely to happen.
 */
static inline void kasan_free_nondeferred_pages(struct page *page, int order)
{
	if (!static_branch_unlikely(&deferred_pages))
		kasan_free_pages(page, order);
}

393
/* Returns true if the struct page for the pfn is uninitialised */
394
static inline bool __meminit early_page_uninitialised(unsigned long pfn)
395
{
396 397 398
	int nid = early_pfn_to_nid(pfn);

	if (node_online(nid) && pfn >= NODE_DATA(nid)->first_deferred_pfn)
399 400 401 402 403 404
		return true;

	return false;
}

/*
405
 * Returns true when the remaining initialisation should be deferred until
406 407
 * later in the boot cycle when it can be parallelised.
 */
408 409
static bool __meminit
defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
410
{
411 412 413 414 415 416 417 418 419 420 421
	static unsigned long prev_end_pfn, nr_initialised;

	/*
	 * prev_end_pfn static that contains the end of previous zone
	 * No need to protect because called very early in boot before smp_init.
	 */
	if (prev_end_pfn != end_pfn) {
		prev_end_pfn = end_pfn;
		nr_initialised = 0;
	}

422
	/* Always populate low zones for address-constrained allocations */
423
	if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
424
		return false;
425 426 427 428 429

	/*
	 * We start only with one section of pages, more pages are added as
	 * needed until the rest of deferred pages are initialized.
	 */
430
	nr_initialised++;
431
	if ((nr_initialised > PAGES_PER_SECTION) &&
432 433 434
	    (pfn & (PAGES_PER_SECTION - 1)) == 0) {
		NODE_DATA(nid)->first_deferred_pfn = pfn;
		return true;
435
	}
436
	return false;
437 438
}
#else
439 440
#define kasan_free_nondeferred_pages(p, o)	kasan_free_pages(p, o)

441 442 443 444 445
static inline bool early_page_uninitialised(unsigned long pfn)
{
	return false;
}

446
static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
447
{
448
	return false;
449 450 451
}
#endif

452 453 454 455 456
/* Return a pointer to the bitmap storing bits affecting a block of pages */
static inline unsigned long *get_pageblock_bitmap(struct page *page,
							unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
457
	return section_to_usemap(__pfn_to_section(pfn));
458 459 460 461 462 463 464 465 466 467 468 469
#else
	return page_zone(page)->pageblock_flags;
#endif /* CONFIG_SPARSEMEM */
}

static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
	pfn &= (PAGES_PER_SECTION-1);
#else
	pfn = pfn - round_down(page_zone(page)->zone_start_pfn, pageblock_nr_pages);
#endif /* CONFIG_SPARSEMEM */
470
	return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
471 472
}

473 474
static __always_inline
unsigned long __get_pfnblock_flags_mask(struct page *page,
475 476 477 478 479 480 481 482 483 484 485 486 487
					unsigned long pfn,
					unsigned long mask)
{
	unsigned long *bitmap;
	unsigned long bitidx, word_bitidx;
	unsigned long word;

	bitmap = get_pageblock_bitmap(page, pfn);
	bitidx = pfn_to_bitidx(page, pfn);
	word_bitidx = bitidx / BITS_PER_LONG;
	bitidx &= (BITS_PER_LONG-1);

	word = bitmap[word_bitidx];
488
	return (word >> bitidx) & mask;
489 490
}

M
Mauro Carvalho Chehab 已提交
491 492 493 494 495 496 497 498
/**
 * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
 * @page: The page within the block of interest
 * @pfn: The target page frame number
 * @mask: mask of bits that the caller is interested in
 *
 * Return: pageblock_bits flags
 */
499 500 501
unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
					unsigned long mask)
{
502
	return __get_pfnblock_flags_mask(page, pfn, mask);
503 504 505 506
}

static __always_inline int get_pfnblock_migratetype(struct page *page, unsigned long pfn)
{
507
	return __get_pfnblock_flags_mask(page, pfn, MIGRATETYPE_MASK);
508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525
}

/**
 * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
 * @page: The page within the block of interest
 * @flags: The flags to set
 * @pfn: The target page frame number
 * @mask: mask of bits that the caller is interested in
 */
void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
					unsigned long pfn,
					unsigned long mask)
{
	unsigned long *bitmap;
	unsigned long bitidx, word_bitidx;
	unsigned long old_word, word;

	BUILD_BUG_ON(NR_PAGEBLOCK_BITS != 4);
526
	BUILD_BUG_ON(MIGRATE_TYPES > (1 << PB_migratetype_bits));
527 528 529 530 531 532 533 534

	bitmap = get_pageblock_bitmap(page, pfn);
	bitidx = pfn_to_bitidx(page, pfn);
	word_bitidx = bitidx / BITS_PER_LONG;
	bitidx &= (BITS_PER_LONG-1);

	VM_BUG_ON_PAGE(!zone_spans_pfn(page_zone(page), pfn), page);

535 536
	mask <<= bitidx;
	flags <<= bitidx;
537 538 539 540 541 542 543 544 545

	word = READ_ONCE(bitmap[word_bitidx]);
	for (;;) {
		old_word = cmpxchg(&bitmap[word_bitidx], word, (word & ~mask) | flags);
		if (word == old_word)
			break;
		word = old_word;
	}
}
546

547
void set_pageblock_migratetype(struct page *page, int migratetype)
548
{
549 550
	if (unlikely(page_group_by_mobility_disabled &&
		     migratetype < MIGRATE_PCPTYPES))
551 552
		migratetype = MIGRATE_UNMOVABLE;

553
	set_pfnblock_flags_mask(page, (unsigned long)migratetype,
554
				page_to_pfn(page), MIGRATETYPE_MASK);
555 556
}

N
Nick Piggin 已提交
557
#ifdef CONFIG_DEBUG_VM
558
static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
L
Linus Torvalds 已提交
559
{
560 561 562
	int ret = 0;
	unsigned seq;
	unsigned long pfn = page_to_pfn(page);
563
	unsigned long sp, start_pfn;
564

565 566
	do {
		seq = zone_span_seqbegin(zone);
567 568
		start_pfn = zone->zone_start_pfn;
		sp = zone->spanned_pages;
569
		if (!zone_spans_pfn(zone, pfn))
570 571 572
			ret = 1;
	} while (zone_span_seqretry(zone, seq));

573
	if (ret)
574 575 576
		pr_err("page 0x%lx outside node %d zone %s [ 0x%lx - 0x%lx ]\n",
			pfn, zone_to_nid(zone), zone->name,
			start_pfn, start_pfn + sp);
577

578
	return ret;
579 580 581 582
}

static int page_is_consistent(struct zone *zone, struct page *page)
{
583
	if (!pfn_valid_within(page_to_pfn(page)))
584
		return 0;
L
Linus Torvalds 已提交
585
	if (zone != page_zone(page))
586 587 588 589 590 591 592
		return 0;

	return 1;
}
/*
 * Temporary debugging check for pages not lying within a given zone.
 */
593
static int __maybe_unused bad_range(struct zone *zone, struct page *page)
594 595
{
	if (page_outside_zone_boundaries(zone, page))
L
Linus Torvalds 已提交
596
		return 1;
597 598 599
	if (!page_is_consistent(zone, page))
		return 1;

L
Linus Torvalds 已提交
600 601
	return 0;
}
N
Nick Piggin 已提交
602
#else
603
static inline int __maybe_unused bad_range(struct zone *zone, struct page *page)
N
Nick Piggin 已提交
604 605 606 607 608
{
	return 0;
}
#endif

609
static void bad_page(struct page *page, const char *reason)
L
Linus Torvalds 已提交
610
{
611 612 613 614 615 616 617 618 619 620 621 622 623 624
	static unsigned long resume;
	static unsigned long nr_shown;
	static unsigned long nr_unshown;

	/*
	 * Allow a burst of 60 reports, then keep quiet for that minute;
	 * or allow a steady drip of one report per second.
	 */
	if (nr_shown == 60) {
		if (time_before(jiffies, resume)) {
			nr_unshown++;
			goto out;
		}
		if (nr_unshown) {
625
			pr_alert(
626
			      "BUG: Bad page state: %lu messages suppressed\n",
627 628 629 630 631 632 633 634
				nr_unshown);
			nr_unshown = 0;
		}
		nr_shown = 0;
	}
	if (nr_shown++ == 0)
		resume = jiffies + 60 * HZ;

635
	pr_alert("BUG: Bad page state in process %s  pfn:%05lx\n",
636
		current->comm, page_to_pfn(page));
637
	__dump_page(page, reason);
638
	dump_page_owner(page);
639

640
	print_modules();
L
Linus Torvalds 已提交
641
	dump_stack();
642
out:
643
	/* Leave bad fields for debug, except PageBuddy could make trouble */
644
	page_mapcount_reset(page); /* remove PageBuddy */
645
	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
L
Linus Torvalds 已提交
646 647 648 649 650
}

/*
 * Higher-order pages are called "compound pages".  They are structured thusly:
 *
651
 * The first PAGE_SIZE page is called the "head page" and have PG_head set.
L
Linus Torvalds 已提交
652
 *
653 654
 * The remaining PAGE_SIZE pages are called "tail pages". PageTail() is encoded
 * in bit 0 of page->compound_head. The rest of bits is pointer to head page.
L
Linus Torvalds 已提交
655
 *
656 657
 * The first tail page's ->compound_dtor holds the offset in array of compound
 * page destructors. See compound_page_dtors.
L
Linus Torvalds 已提交
658
 *
659
 * The first tail page's ->compound_order holds the order of allocation.
660
 * This usage means that zero-order pages may not be compound.
L
Linus Torvalds 已提交
661
 */
662

663
void free_compound_page(struct page *page)
664
{
665
	mem_cgroup_uncharge(page);
666
	__free_pages_ok(page, compound_order(page), FPI_NONE);
667 668
}

669
void prep_compound_page(struct page *page, unsigned int order)
670 671 672 673 674 675 676
{
	int i;
	int nr_pages = 1 << order;

	__SetPageHead(page);
	for (i = 1; i < nr_pages; i++) {
		struct page *p = page + i;
677
		set_page_count(p, 0);
678
		p->mapping = TAIL_MAPPING;
679
		set_compound_head(p, page);
680
	}
681 682 683

	set_compound_page_dtor(page, COMPOUND_PAGE_DTOR);
	set_compound_order(page, order);
684
	atomic_set(compound_mapcount_ptr(page), -1);
685 686
	if (hpage_pincount_available(page))
		atomic_set(compound_pincount_ptr(page), 0);
687 688
}

689 690
#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
691

692 693 694
bool _debug_pagealloc_enabled_early __read_mostly
			= IS_ENABLED(CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT);
EXPORT_SYMBOL(_debug_pagealloc_enabled_early);
695
DEFINE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
696
EXPORT_SYMBOL(_debug_pagealloc_enabled);
697 698

DEFINE_STATIC_KEY_FALSE(_debug_guardpage_enabled);
699

700 701
static int __init early_debug_pagealloc(char *buf)
{
702
	return kstrtobool(buf, &_debug_pagealloc_enabled_early);
703 704 705
}
early_param("debug_pagealloc", early_debug_pagealloc);

706 707 708 709 710
static int __init debug_guardpage_minorder_setup(char *buf)
{
	unsigned long res;

	if (kstrtoul(buf, 10, &res) < 0 ||  res > MAX_ORDER / 2) {
711
		pr_err("Bad debug_guardpage_minorder value\n");
712 713 714
		return 0;
	}
	_debug_guardpage_minorder = res;
715
	pr_info("Setting debug_guardpage_minorder to %lu\n", res);
716 717
	return 0;
}
718
early_param("debug_guardpage_minorder", debug_guardpage_minorder_setup);
719

720
static inline bool set_page_guard(struct zone *zone, struct page *page,
721
				unsigned int order, int migratetype)
722
{
723
	if (!debug_guardpage_enabled())
724 725 726 727
		return false;

	if (order >= debug_guardpage_minorder())
		return false;
728

729
	__SetPageGuard(page);
730 731 732 733
	INIT_LIST_HEAD(&page->lru);
	set_page_private(page, order);
	/* Guard pages are not available for any usage */
	__mod_zone_freepage_state(zone, -(1 << order), migratetype);
734 735

	return true;
736 737
}

738 739
static inline void clear_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype)
740
{
741 742 743
	if (!debug_guardpage_enabled())
		return;

744
	__ClearPageGuard(page);
745

746 747 748
	set_page_private(page, 0);
	if (!is_migrate_isolate(migratetype))
		__mod_zone_freepage_state(zone, (1 << order), migratetype);
749 750
}
#else
751 752
static inline bool set_page_guard(struct zone *zone, struct page *page,
			unsigned int order, int migratetype) { return false; }
753 754
static inline void clear_page_guard(struct zone *zone, struct page *page,
				unsigned int order, int migratetype) {}
755 756
#endif

757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
/*
 * Enable static keys related to various memory debugging and hardening options.
 * Some override others, and depend on early params that are evaluated in the
 * order of appearance. So we need to first gather the full picture of what was
 * enabled, and then make decisions.
 */
void init_mem_debugging_and_hardening(void)
{
	if (_init_on_alloc_enabled_early) {
		if (page_poisoning_enabled())
			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
				"will take precedence over init_on_alloc\n");
		else
			static_branch_enable(&init_on_alloc);
	}
	if (_init_on_free_enabled_early) {
		if (page_poisoning_enabled())
			pr_info("mem auto-init: CONFIG_PAGE_POISONING is on, "
				"will take precedence over init_on_free\n");
		else
			static_branch_enable(&init_on_free);
	}

780 781 782 783 784 785 786 787 788 789 790
#ifdef CONFIG_PAGE_POISONING
	/*
	 * Page poisoning is debug page alloc for some arches. If
	 * either of those options are enabled, enable poisoning.
	 */
	if (page_poisoning_enabled() ||
	     (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
	      debug_pagealloc_enabled()))
		static_branch_enable(&_page_poisoning_enabled);
#endif

791 792 793 794 795 796 797 798 799 800 801 802 803
#ifdef CONFIG_DEBUG_PAGEALLOC
	if (!debug_pagealloc_enabled())
		return;

	static_branch_enable(&_debug_pagealloc_enabled);

	if (!debug_guardpage_minorder())
		return;

	static_branch_enable(&_debug_guardpage_enabled);
#endif
}

804
static inline void set_buddy_order(struct page *page, unsigned int order)
805
{
H
Hugh Dickins 已提交
806
	set_page_private(page, order);
807
	__SetPageBuddy(page);
L
Linus Torvalds 已提交
808 809 810 811
}

/*
 * This function checks whether a page is free && is the buddy
812
 * we can coalesce a page and its buddy if
813
 * (a) the buddy is not in a hole (check before calling!) &&
814
 * (b) the buddy is in the buddy system &&
815 816
 * (c) a page and its buddy have the same order &&
 * (d) a page and its buddy are in the same zone.
817
 *
818 819
 * For recording whether a page is in the buddy system, we set PageBuddy.
 * Setting, clearing, and testing PageBuddy is serialized by zone->lock.
L
Linus Torvalds 已提交
820
 *
821
 * For recording page's order, we use page_private(page).
L
Linus Torvalds 已提交
822
 */
823
static inline bool page_is_buddy(struct page *page, struct page *buddy,
824
							unsigned int order)
L
Linus Torvalds 已提交
825
{
826 827
	if (!page_is_guard(buddy) && !PageBuddy(buddy))
		return false;
828

829
	if (buddy_order(buddy) != order)
830
		return false;
831

832 833 834 835 836 837
	/*
	 * zone check is done late to avoid uselessly calculating
	 * zone/node ids for pages that could never merge.
	 */
	if (page_zone_id(page) != page_zone_id(buddy))
		return false;
838

839
	VM_BUG_ON_PAGE(page_count(buddy) != 0, buddy);
840

841
	return true;
L
Linus Torvalds 已提交
842 843
}

844 845 846 847 848
#ifdef CONFIG_COMPACTION
static inline struct capture_control *task_capc(struct zone *zone)
{
	struct capture_control *capc = current->capture_control;

849
	return unlikely(capc) &&
850 851
		!(current->flags & PF_KTHREAD) &&
		!capc->page &&
852
		capc->cc->zone == zone ? capc : NULL;
853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
}

static inline bool
compaction_capture(struct capture_control *capc, struct page *page,
		   int order, int migratetype)
{
	if (!capc || order != capc->cc->order)
		return false;

	/* Do not accidentally pollute CMA or isolated regions*/
	if (is_migrate_cma(migratetype) ||
	    is_migrate_isolate(migratetype))
		return false;

	/*
	 * Do not let lower order allocations polluate a movable pageblock.
	 * This might let an unmovable request use a reclaimable pageblock
	 * and vice-versa but no more than normal fallback logic which can
	 * have trouble finding a high-order free page.
	 */
	if (order < pageblock_order && migratetype == MIGRATE_MOVABLE)
		return false;

	capc->page = page;
	return true;
}

#else
static inline struct capture_control *task_capc(struct zone *zone)
{
	return NULL;
}

static inline bool
compaction_capture(struct capture_control *capc, struct page *page,
		   int order, int migratetype)
{
	return false;
}
#endif /* CONFIG_COMPACTION */

894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913
/* Used for pages not on another list */
static inline void add_to_free_list(struct page *page, struct zone *zone,
				    unsigned int order, int migratetype)
{
	struct free_area *area = &zone->free_area[order];

	list_add(&page->lru, &area->free_list[migratetype]);
	area->nr_free++;
}

/* Used for pages not on another list */
static inline void add_to_free_list_tail(struct page *page, struct zone *zone,
					 unsigned int order, int migratetype)
{
	struct free_area *area = &zone->free_area[order];

	list_add_tail(&page->lru, &area->free_list[migratetype]);
	area->nr_free++;
}

914 915 916 917 918
/*
 * Used for pages which are on another list. Move the pages to the tail
 * of the list - so the moved pages won't immediately be considered for
 * allocation again (e.g., optimization for memory onlining).
 */
919 920 921 922 923
static inline void move_to_free_list(struct page *page, struct zone *zone,
				     unsigned int order, int migratetype)
{
	struct free_area *area = &zone->free_area[order];

924
	list_move_tail(&page->lru, &area->free_list[migratetype]);
925 926 927 928 929
}

static inline void del_page_from_free_list(struct page *page, struct zone *zone,
					   unsigned int order)
{
A
Alexander Duyck 已提交
930 931 932 933
	/* clear reported state and update reported page count */
	if (page_reported(page))
		__ClearPageReported(page);

934 935 936 937 938 939
	list_del(&page->lru);
	__ClearPageBuddy(page);
	set_page_private(page, 0);
	zone->free_area[order].nr_free--;
}

940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969
/*
 * If this is not the largest possible page, check if the buddy
 * of the next-highest order is free. If it is, it's possible
 * that pages are being freed that will coalesce soon. In case,
 * that is happening, add the free page to the tail of the list
 * so it's less likely to be used soon and more likely to be merged
 * as a higher order page
 */
static inline bool
buddy_merge_likely(unsigned long pfn, unsigned long buddy_pfn,
		   struct page *page, unsigned int order)
{
	struct page *higher_page, *higher_buddy;
	unsigned long combined_pfn;

	if (order >= MAX_ORDER - 2)
		return false;

	if (!pfn_valid_within(buddy_pfn))
		return false;

	combined_pfn = buddy_pfn & pfn;
	higher_page = page + (combined_pfn - pfn);
	buddy_pfn = __find_buddy_pfn(combined_pfn, order + 1);
	higher_buddy = higher_page + (buddy_pfn - combined_pfn);

	return pfn_valid_within(buddy_pfn) &&
	       page_is_buddy(higher_page, higher_buddy, order + 1);
}

L
Linus Torvalds 已提交
970 971 972 973 974 975 976 977 978 979 980 981 982
/*
 * Freeing function for a buddy system allocator.
 *
 * The concept of a buddy system is to maintain direct-mapped table
 * (containing bit values) for memory blocks of various "orders".
 * The bottom level table contains the map for the smallest allocatable
 * units of memory (here, pages), and each level above it describes
 * pairs of units from the levels below, hence, "buddies".
 * At a high level, all that happens here is marking the table entry
 * at the bottom level available, and propagating the changes upward
 * as necessary, plus some accounting needed to play nicely with other
 * parts of the VM system.
 * At each level, we keep a list of pages, which are heads of continuous
983 984
 * free pages of length of (1 << order) and marked with PageBuddy.
 * Page's order is recorded in page_private(page) field.
L
Linus Torvalds 已提交
985
 * So when we are allocating or freeing one, we can derive the state of the
986 987
 * other.  That is, if we allocate a small block, and both were
 * free, the remainder of the region must be split into blocks.
L
Linus Torvalds 已提交
988
 * If a block is freed, and its buddy is also free, then this
989
 * triggers coalescing into a block of larger size.
L
Linus Torvalds 已提交
990
 *
991
 * -- nyc
L
Linus Torvalds 已提交
992 993
 */

N
Nick Piggin 已提交
994
static inline void __free_one_page(struct page *page,
995
		unsigned long pfn,
996
		struct zone *zone, unsigned int order,
997
		int migratetype, fpi_t fpi_flags)
L
Linus Torvalds 已提交
998
{
999
	struct capture_control *capc = task_capc(zone);
1000
	unsigned long buddy_pfn;
1001
	unsigned long combined_pfn;
1002
	unsigned int max_order;
1003 1004
	struct page *buddy;
	bool to_tail;
1005

1006
	max_order = min_t(unsigned int, MAX_ORDER - 1, pageblock_order);
L
Linus Torvalds 已提交
1007

1008
	VM_BUG_ON(!zone_is_initialized(zone));
1009
	VM_BUG_ON_PAGE(page->flags & PAGE_FLAGS_CHECK_AT_PREP, page);
L
Linus Torvalds 已提交
1010

1011
	VM_BUG_ON(migratetype == -1);
1012
	if (likely(!is_migrate_isolate(migratetype)))
1013
		__mod_zone_freepage_state(zone, 1 << order, migratetype);
1014

1015
	VM_BUG_ON_PAGE(pfn & ((1 << order) - 1), page);
1016
	VM_BUG_ON_PAGE(bad_range(zone, page), page);
L
Linus Torvalds 已提交
1017

1018
continue_merging:
1019
	while (order < max_order) {
1020 1021 1022 1023 1024
		if (compaction_capture(capc, page, order, migratetype)) {
			__mod_zone_freepage_state(zone, -(1 << order),
								migratetype);
			return;
		}
1025 1026
		buddy_pfn = __find_buddy_pfn(pfn, order);
		buddy = page + (buddy_pfn - pfn);
1027 1028 1029

		if (!pfn_valid_within(buddy_pfn))
			goto done_merging;
1030
		if (!page_is_buddy(page, buddy, order))
1031
			goto done_merging;
1032 1033 1034 1035
		/*
		 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
		 * merge with it and move up one order.
		 */
1036
		if (page_is_guard(buddy))
1037
			clear_page_guard(zone, buddy, order, migratetype);
1038
		else
1039
			del_page_from_free_list(buddy, zone, order);
1040 1041 1042
		combined_pfn = buddy_pfn & pfn;
		page = page + (combined_pfn - pfn);
		pfn = combined_pfn;
L
Linus Torvalds 已提交
1043 1044
		order++;
	}
1045
	if (order < MAX_ORDER - 1) {
1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056
		/* If we are here, it means order is >= pageblock_order.
		 * We want to prevent merge between freepages on isolate
		 * pageblock and normal pageblock. Without this, pageblock
		 * isolation could cause incorrect freepage or CMA accounting.
		 *
		 * We don't want to hit this code for the more frequent
		 * low-order merging.
		 */
		if (unlikely(has_isolate_pageblock(zone))) {
			int buddy_mt;

1057 1058
			buddy_pfn = __find_buddy_pfn(pfn, order);
			buddy = page + (buddy_pfn - pfn);
1059 1060 1061 1062 1063 1064 1065
			buddy_mt = get_pageblock_migratetype(buddy);

			if (migratetype != buddy_mt
					&& (is_migrate_isolate(migratetype) ||
						is_migrate_isolate(buddy_mt)))
				goto done_merging;
		}
1066
		max_order = order + 1;
1067 1068 1069 1070
		goto continue_merging;
	}

done_merging:
1071
	set_buddy_order(page, order);
1072

1073 1074 1075
	if (fpi_flags & FPI_TO_TAIL)
		to_tail = true;
	else if (is_shuffle_order(order))
1076
		to_tail = shuffle_pick_tail();
1077
	else
1078
		to_tail = buddy_merge_likely(pfn, buddy_pfn, page, order);
1079

1080
	if (to_tail)
1081
		add_to_free_list_tail(page, zone, order, migratetype);
1082
	else
1083
		add_to_free_list(page, zone, order, migratetype);
A
Alexander Duyck 已提交
1084 1085

	/* Notify page reporting subsystem of freed page */
1086
	if (!(fpi_flags & FPI_SKIP_REPORT_NOTIFY))
A
Alexander Duyck 已提交
1087
		page_reporting_notify_free(order);
L
Linus Torvalds 已提交
1088 1089
}

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103
/*
 * A bad page could be due to a number of fields. Instead of multiple branches,
 * try and check multiple fields with one check. The caller must do a detailed
 * check if necessary.
 */
static inline bool page_expected_state(struct page *page,
					unsigned long check_flags)
{
	if (unlikely(atomic_read(&page->_mapcount) != -1))
		return false;

	if (unlikely((unsigned long)page->mapping |
			page_ref_count(page) |
#ifdef CONFIG_MEMCG
1104
			(unsigned long)page_memcg(page) |
1105 1106 1107 1108 1109 1110 1111
#endif
			(page->flags & check_flags)))
		return false;

	return true;
}

1112
static const char *page_bad_reason(struct page *page, unsigned long flags)
L
Linus Torvalds 已提交
1113
{
1114
	const char *bad_reason = NULL;
1115

1116
	if (unlikely(atomic_read(&page->_mapcount) != -1))
1117 1118 1119
		bad_reason = "nonzero mapcount";
	if (unlikely(page->mapping != NULL))
		bad_reason = "non-NULL mapping";
1120
	if (unlikely(page_ref_count(page) != 0))
1121
		bad_reason = "nonzero _refcount";
1122 1123 1124 1125 1126
	if (unlikely(page->flags & flags)) {
		if (flags == PAGE_FLAGS_CHECK_AT_PREP)
			bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag(s) set";
		else
			bad_reason = "PAGE_FLAGS_CHECK_AT_FREE flag(s) set";
1127
	}
1128
#ifdef CONFIG_MEMCG
1129
	if (unlikely(page_memcg(page)))
1130 1131
		bad_reason = "page still charged to cgroup";
#endif
1132 1133 1134 1135 1136 1137 1138
	return bad_reason;
}

static void check_free_page_bad(struct page *page)
{
	bad_page(page,
		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_FREE));
1139 1140
}

1141
static inline int check_free_page(struct page *page)
1142
{
1143
	if (likely(page_expected_state(page, PAGE_FLAGS_CHECK_AT_FREE)))
1144 1145 1146
		return 0;

	/* Something has gone sideways, find it */
1147
	check_free_page_bad(page);
1148
	return 1;
L
Linus Torvalds 已提交
1149 1150
}

1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166
static int free_tail_pages_check(struct page *head_page, struct page *page)
{
	int ret = 1;

	/*
	 * We rely page->lru.next never has bit 0 set, unless the page
	 * is PageTail(). Let's make sure that's true even for poisoned ->lru.
	 */
	BUILD_BUG_ON((unsigned long)LIST_POISON1 & 1);

	if (!IS_ENABLED(CONFIG_DEBUG_VM)) {
		ret = 0;
		goto out;
	}
	switch (page - head_page) {
	case 1:
1167
		/* the first tail page: ->mapping may be compound_mapcount() */
1168
		if (unlikely(compound_mapcount(page))) {
1169
			bad_page(page, "nonzero compound_mapcount");
1170 1171 1172 1173 1174 1175
			goto out;
		}
		break;
	case 2:
		/*
		 * the second tail page: ->mapping is
M
Matthew Wilcox 已提交
1176
		 * deferred_list.next -- ignore value.
1177 1178 1179 1180
		 */
		break;
	default:
		if (page->mapping != TAIL_MAPPING) {
1181
			bad_page(page, "corrupted mapping in tail page");
1182 1183 1184 1185 1186
			goto out;
		}
		break;
	}
	if (unlikely(!PageTail(page))) {
1187
		bad_page(page, "PageTail not set");
1188 1189 1190
		goto out;
	}
	if (unlikely(compound_head(page) != head_page)) {
1191
		bad_page(page, "compound_head not consistent");
1192 1193 1194 1195 1196 1197 1198 1199 1200
		goto out;
	}
	ret = 0;
out:
	page->mapping = NULL;
	clear_compound_head(page);
	return ret;
}

1201 1202 1203 1204
static void kernel_init_free_pages(struct page *page, int numpages)
{
	int i;

1205 1206
	/* s390's use of memset() could override KASAN redzones. */
	kasan_disable_current();
1207 1208
	for (i = 0; i < numpages; i++) {
		page_kasan_tag_reset(page + i);
1209
		clear_highpage(page + i);
1210
	}
1211
	kasan_enable_current();
1212 1213
}

1214 1215
static __always_inline bool free_pages_prepare(struct page *page,
					unsigned int order, bool check_free)
1216
{
1217
	int bad = 0;
1218 1219 1220

	VM_BUG_ON_PAGE(PageTail(page), page);

1221 1222
	trace_mm_page_free(page, order);

1223 1224 1225 1226 1227
	if (unlikely(PageHWPoison(page)) && !order) {
		/*
		 * Do not let hwpoison pages hit pcplists/buddy
		 * Untie memcg state and reset page's owner
		 */
1228
		if (memcg_kmem_enabled() && PageMemcgKmem(page))
1229 1230 1231 1232 1233
			__memcg_kmem_uncharge_page(page, order);
		reset_page_owner(page, order);
		return false;
	}

1234 1235 1236 1237 1238 1239 1240 1241 1242
	/*
	 * Check tail pages before head page information is cleared to
	 * avoid checking PageCompound for order-0 pages.
	 */
	if (unlikely(order)) {
		bool compound = PageCompound(page);
		int i;

		VM_BUG_ON_PAGE(compound && compound_order(page) != order, page);
1243

1244 1245
		if (compound)
			ClearPageDoubleMap(page);
1246 1247 1248
		for (i = 1; i < (1 << order); i++) {
			if (compound)
				bad += free_tail_pages_check(page, page + i);
1249
			if (unlikely(check_free_page(page + i))) {
1250 1251 1252 1253 1254 1255
				bad++;
				continue;
			}
			(page + i)->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
		}
	}
1256
	if (PageMappingFlags(page))
1257
		page->mapping = NULL;
1258
	if (memcg_kmem_enabled() && PageMemcgKmem(page))
1259
		__memcg_kmem_uncharge_page(page, order);
1260
	if (check_free)
1261
		bad += check_free_page(page);
1262 1263
	if (bad)
		return false;
1264

1265 1266 1267
	page_cpupid_reset_last(page);
	page->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
	reset_page_owner(page, order);
1268 1269 1270

	if (!PageHighMem(page)) {
		debug_check_no_locks_freed(page_address(page),
1271
					   PAGE_SIZE << order);
1272
		debug_check_no_obj_freed(page_address(page),
1273
					   PAGE_SIZE << order);
1274
	}
1275 1276 1277
	if (want_init_on_free())
		kernel_init_free_pages(page, 1 << order);

1278 1279
	kernel_poison_pages(page, 1 << order);

1280 1281 1282 1283 1284 1285 1286
	/*
	 * arch_free_page() can make the page's contents inaccessible.  s390
	 * does this.  So nothing which can access the page's contents should
	 * happen after this.
	 */
	arch_free_page(page, order);

1287
	debug_pagealloc_unmap_pages(page, 1 << order);
1288

1289
	kasan_free_nondeferred_pages(page, order);
1290 1291 1292 1293

	return true;
}

1294
#ifdef CONFIG_DEBUG_VM
1295 1296 1297 1298 1299 1300
/*
 * With DEBUG_VM enabled, order-0 pages are checked immediately when being freed
 * to pcp lists. With debug_pagealloc also enabled, they are also rechecked when
 * moved from pcp lists to free lists.
 */
static bool free_pcp_prepare(struct page *page)
1301 1302 1303 1304
{
	return free_pages_prepare(page, 0, true);
}

1305
static bool bulkfree_pcp_prepare(struct page *page)
1306
{
1307
	if (debug_pagealloc_enabled_static())
1308
		return check_free_page(page);
1309 1310
	else
		return false;
1311 1312
}
#else
1313 1314 1315 1316 1317 1318
/*
 * With DEBUG_VM disabled, order-0 pages being freed are checked only when
 * moving from pcp lists to free list in order to reduce overhead. With
 * debug_pagealloc enabled, they are checked also immediately when being freed
 * to the pcp lists.
 */
1319 1320
static bool free_pcp_prepare(struct page *page)
{
1321
	if (debug_pagealloc_enabled_static())
1322 1323 1324
		return free_pages_prepare(page, 0, true);
	else
		return free_pages_prepare(page, 0, false);
1325 1326
}

1327 1328
static bool bulkfree_pcp_prepare(struct page *page)
{
1329
	return check_free_page(page);
1330 1331 1332
}
#endif /* CONFIG_DEBUG_VM */

1333 1334 1335 1336 1337 1338 1339 1340 1341
static inline void prefetch_buddy(struct page *page)
{
	unsigned long pfn = page_to_pfn(page);
	unsigned long buddy_pfn = __find_buddy_pfn(pfn, 0);
	struct page *buddy = page + (buddy_pfn - pfn);

	prefetch(buddy);
}

L
Linus Torvalds 已提交
1342
/*
1343
 * Frees a number of pages from the PCP lists
L
Linus Torvalds 已提交
1344
 * Assumes all pages on list are in same zone, and of same order.
1345
 * count is the number of pages to free.
L
Linus Torvalds 已提交
1346 1347 1348 1349 1350 1351 1352
 *
 * If the zone was previously in an "all pages pinned" state then look to
 * see if this freeing clears that state.
 *
 * And clear the zone's pages_scanned counter, to hold off the "all pages are
 * pinned" detection logic.
 */
1353 1354
static void free_pcppages_bulk(struct zone *zone, int count,
					struct per_cpu_pages *pcp)
L
Linus Torvalds 已提交
1355
{
1356
	int migratetype = 0;
1357
	int batch_free = 0;
1358
	int prefetch_nr = READ_ONCE(pcp->batch);
1359
	bool isolated_pageblocks;
1360 1361
	struct page *page, *tmp;
	LIST_HEAD(head);
1362

1363 1364 1365 1366 1367
	/*
	 * Ensure proper count is passed which otherwise would stuck in the
	 * below while (list_empty(list)) loop.
	 */
	count = min(pcp->count, count);
1368
	while (count) {
1369 1370 1371
		struct list_head *list;

		/*
1372 1373 1374 1375 1376
		 * Remove pages from lists in a round-robin fashion. A
		 * batch_free count is maintained that is incremented when an
		 * empty list is encountered.  This is so more pages are freed
		 * off fuller lists instead of spinning excessively around empty
		 * lists
1377 1378
		 */
		do {
1379
			batch_free++;
1380 1381 1382 1383
			if (++migratetype == MIGRATE_PCPTYPES)
				migratetype = 0;
			list = &pcp->lists[migratetype];
		} while (list_empty(list));
N
Nick Piggin 已提交
1384

1385 1386
		/* This is the only non-empty list. Free them all. */
		if (batch_free == MIGRATE_PCPTYPES)
1387
			batch_free = count;
1388

1389
		do {
1390
			page = list_last_entry(list, struct page, lru);
1391
			/* must delete to avoid corrupting pcp list */
1392
			list_del(&page->lru);
1393
			pcp->count--;
1394

1395 1396 1397
			if (bulkfree_pcp_prepare(page))
				continue;

1398
			list_add_tail(&page->lru, &head);
1399 1400 1401 1402 1403 1404 1405 1406 1407 1408

			/*
			 * We are going to put the page back to the global
			 * pool, prefetch its buddy to speed up later access
			 * under zone->lock. It is believed the overhead of
			 * an additional test and calculating buddy_pfn here
			 * can be offset by reduced memory latency later. To
			 * avoid excessive prefetching due to large count, only
			 * prefetch buddy for the first pcp->batch nr of pages.
			 */
1409
			if (prefetch_nr) {
1410
				prefetch_buddy(page);
1411 1412
				prefetch_nr--;
			}
1413
		} while (--count && --batch_free && !list_empty(list));
L
Linus Torvalds 已提交
1414
	}
1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430

	spin_lock(&zone->lock);
	isolated_pageblocks = has_isolate_pageblock(zone);

	/*
	 * Use safe version since after __free_one_page(),
	 * page->lru.next will not point to original list.
	 */
	list_for_each_entry_safe(page, tmp, &head, lru) {
		int mt = get_pcppage_migratetype(page);
		/* MIGRATE_ISOLATE page should not go to pcplists */
		VM_BUG_ON_PAGE(is_migrate_isolate(mt), page);
		/* Pageblock could have been isolated meanwhile */
		if (unlikely(isolated_pageblocks))
			mt = get_pageblock_migratetype(page);

1431
		__free_one_page(page, page_to_pfn(page), zone, 0, mt, FPI_NONE);
1432 1433
		trace_mm_page_pcpu_drain(page, 0, mt);
	}
1434
	spin_unlock(&zone->lock);
L
Linus Torvalds 已提交
1435 1436
}

1437 1438
static void free_one_page(struct zone *zone,
				struct page *page, unsigned long pfn,
1439
				unsigned int order,
1440
				int migratetype, fpi_t fpi_flags)
L
Linus Torvalds 已提交
1441
{
1442
	spin_lock(&zone->lock);
1443 1444 1445 1446
	if (unlikely(has_isolate_pageblock(zone) ||
		is_migrate_isolate(migratetype))) {
		migratetype = get_pfnblock_migratetype(page, pfn);
	}
1447
	__free_one_page(page, pfn, zone, order, migratetype, fpi_flags);
1448
	spin_unlock(&zone->lock);
N
Nick Piggin 已提交
1449 1450
}

1451
static void __meminit __init_single_page(struct page *page, unsigned long pfn,
1452
				unsigned long zone, int nid)
1453
{
1454
	mm_zero_struct_page(page);
1455 1456 1457 1458
	set_page_links(page, zone, nid, pfn);
	init_page_count(page);
	page_mapcount_reset(page);
	page_cpupid_reset_last(page);
1459
	page_kasan_tag_reset(page);
1460 1461 1462 1463 1464 1465 1466 1467 1468

	INIT_LIST_HEAD(&page->lru);
#ifdef WANT_PAGE_VIRTUAL
	/* The shift won't overflow because ZONE_NORMAL is below 4G. */
	if (!is_highmem_idx(zone))
		set_page_address(page, __va(pfn << PAGE_SHIFT));
#endif
}

1469
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1470
static void __meminit init_reserved_page(unsigned long pfn)
1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
{
	pg_data_t *pgdat;
	int nid, zid;

	if (!early_page_uninitialised(pfn))
		return;

	nid = early_pfn_to_nid(pfn);
	pgdat = NODE_DATA(nid);

	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
		struct zone *zone = &pgdat->node_zones[zid];

		if (pfn >= zone->zone_start_pfn && pfn < zone_end_pfn(zone))
			break;
	}
1487
	__init_single_page(pfn_to_page(pfn), pfn, zid, nid);
1488 1489 1490 1491 1492 1493 1494
}
#else
static inline void init_reserved_page(unsigned long pfn)
{
}
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */

1495 1496 1497 1498 1499 1500
/*
 * Initialised pages do not have PageReserved set. This function is
 * called for each range allocated by the bootmem allocator and
 * marks the pages PageReserved. The remaining valid pages are later
 * sent to the buddy page allocator.
 */
1501
void __meminit reserve_bootmem_region(phys_addr_t start, phys_addr_t end)
1502 1503 1504 1505
{
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long end_pfn = PFN_UP(end);

1506 1507 1508 1509 1510
	for (; start_pfn < end_pfn; start_pfn++) {
		if (pfn_valid(start_pfn)) {
			struct page *page = pfn_to_page(start_pfn);

			init_reserved_page(start_pfn);
1511 1512 1513 1514

			/* Avoid false-positive PageTail() */
			INIT_LIST_HEAD(&page->lru);

1515 1516 1517 1518 1519 1520
			/*
			 * no need for atomic set_bit because the struct
			 * page is not visible yet so nobody should
			 * access it yet.
			 */
			__SetPageReserved(page);
1521 1522
		}
	}
1523 1524
}

1525 1526
static void __free_pages_ok(struct page *page, unsigned int order,
			    fpi_t fpi_flags)
1527
{
1528
	unsigned long flags;
M
Minchan Kim 已提交
1529
	int migratetype;
1530
	unsigned long pfn = page_to_pfn(page);
1531

1532
	if (!free_pages_prepare(page, order, true))
1533 1534
		return;

1535
	migratetype = get_pfnblock_migratetype(page, pfn);
1536 1537
	local_irq_save(flags);
	__count_vm_events(PGFREE, 1 << order);
1538 1539
	free_one_page(page_zone(page), page, pfn, order, migratetype,
		      fpi_flags);
1540
	local_irq_restore(flags);
L
Linus Torvalds 已提交
1541 1542
}

1543
void __free_pages_core(struct page *page, unsigned int order)
1544
{
1545
	unsigned int nr_pages = 1 << order;
1546
	struct page *p = page;
1547
	unsigned int loop;
1548

1549 1550 1551 1552 1553
	/*
	 * When initializing the memmap, __init_single_page() sets the refcount
	 * of all pages to 1 ("allocated"/"not free"). We have to set the
	 * refcount of all involved pages to 0.
	 */
1554 1555 1556
	prefetchw(p);
	for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
		prefetchw(p + 1);
1557 1558
		__ClearPageReserved(p);
		set_page_count(p, 0);
1559
	}
1560 1561
	__ClearPageReserved(p);
	set_page_count(p, 0);
1562

1563
	atomic_long_add(nr_pages, &page_zone(page)->managed_pages);
1564 1565 1566 1567 1568 1569

	/*
	 * Bypass PCP and place fresh pages right to the tail, primarily
	 * relevant for memory onlining.
	 */
	__free_pages_ok(page, order, FPI_TO_TAIL);
1570 1571
}

1572
#ifdef CONFIG_NEED_MULTIPLE_NODES
1573

1574 1575 1576 1577 1578 1579 1580 1581 1582 1583
/*
 * During memory init memblocks map pfns to nids. The search is expensive and
 * this caches recent lookups. The implementation of __early_pfn_to_nid
 * treats start/end as pfns.
 */
struct mminit_pfnnid_cache {
	unsigned long last_start;
	unsigned long last_end;
	int last_nid;
};
1584

1585
static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
1586 1587 1588 1589

/*
 * Required by SPARSEMEM. Given a PFN, return what node the PFN is on.
 */
1590
static int __meminit __early_pfn_to_nid(unsigned long pfn,
1591
					struct mminit_pfnnid_cache *state)
1592
{
1593
	unsigned long start_pfn, end_pfn;
1594 1595
	int nid;

1596 1597 1598 1599 1600 1601 1602 1603 1604
	if (state->last_start <= pfn && pfn < state->last_end)
		return state->last_nid;

	nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn);
	if (nid != NUMA_NO_NODE) {
		state->last_start = start_pfn;
		state->last_end = end_pfn;
		state->last_nid = nid;
	}
1605 1606

	return nid;
1607 1608 1609 1610
}

int __meminit early_pfn_to_nid(unsigned long pfn)
{
1611
	static DEFINE_SPINLOCK(early_pfn_lock);
1612 1613
	int nid;

1614
	spin_lock(&early_pfn_lock);
1615
	nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
1616
	if (nid < 0)
1617
		nid = first_online_node;
1618
	spin_unlock(&early_pfn_lock);
1619

1620
	return nid;
1621
}
1622
#endif /* CONFIG_NEED_MULTIPLE_NODES */
1623

1624
void __init memblock_free_pages(struct page *page, unsigned long pfn,
1625 1626 1627 1628
							unsigned int order)
{
	if (early_page_uninitialised(pfn))
		return;
1629
	__free_pages_core(page, order);
1630 1631
}

1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
/*
 * Check that the whole (or subset of) a pageblock given by the interval of
 * [start_pfn, end_pfn) is valid and within the same zone, before scanning it
 * with the migration of free compaction scanner. The scanners then need to
 * use only pfn_valid_within() check for arches that allow holes within
 * pageblocks.
 *
 * Return struct page pointer of start_pfn, or NULL if checks were not passed.
 *
 * It's possible on some configurations to have a setup like node0 node1 node0
 * i.e. it's possible that all pages within a zones range of pages do not
 * belong to a single zone. We assume that a border between node0 and node1
 * can occur within a single pageblock, but not a node0 node1 node0
 * interleaving within a single pageblock. It is therefore sufficient to check
 * the first and last page of a pageblock and avoid checking each individual
 * page in a pageblock.
 */
struct page *__pageblock_pfn_to_page(unsigned long start_pfn,
				     unsigned long end_pfn, struct zone *zone)
{
	struct page *start_page;
	struct page *end_page;

	/* end_pfn is one past the range we are checking */
	end_pfn--;

	if (!pfn_valid(start_pfn) || !pfn_valid(end_pfn))
		return NULL;

1661 1662 1663
	start_page = pfn_to_online_page(start_pfn);
	if (!start_page)
		return NULL;
1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691

	if (page_zone(start_page) != zone)
		return NULL;

	end_page = pfn_to_page(end_pfn);

	/* This gives a shorter code than deriving page_zone(end_page) */
	if (page_zone_id(start_page) != page_zone_id(end_page))
		return NULL;

	return start_page;
}

void set_zone_contiguous(struct zone *zone)
{
	unsigned long block_start_pfn = zone->zone_start_pfn;
	unsigned long block_end_pfn;

	block_end_pfn = ALIGN(block_start_pfn + 1, pageblock_nr_pages);
	for (; block_start_pfn < zone_end_pfn(zone);
			block_start_pfn = block_end_pfn,
			 block_end_pfn += pageblock_nr_pages) {

		block_end_pfn = min(block_end_pfn, zone_end_pfn(zone));

		if (!__pageblock_pfn_to_page(block_start_pfn,
					     block_end_pfn, zone))
			return;
1692
		cond_resched();
1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703
	}

	/* We confirm that there is no hole */
	zone->contiguous = true;
}

void clear_zone_contiguous(struct zone *zone)
{
	zone->contiguous = false;
}

1704
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
1705 1706
static void __init deferred_free_range(unsigned long pfn,
				       unsigned long nr_pages)
1707
{
1708 1709
	struct page *page;
	unsigned long i;
1710

1711
	if (!nr_pages)
1712 1713
		return;

1714 1715
	page = pfn_to_page(pfn);

1716
	/* Free a large naturally-aligned chunk if possible */
1717 1718
	if (nr_pages == pageblock_nr_pages &&
	    (pfn & (pageblock_nr_pages - 1)) == 0) {
1719
		set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1720
		__free_pages_core(page, pageblock_order);
1721 1722 1723
		return;
	}

1724 1725 1726
	for (i = 0; i < nr_pages; i++, page++, pfn++) {
		if ((pfn & (pageblock_nr_pages - 1)) == 0)
			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1727
		__free_pages_core(page, 0);
1728
	}
1729 1730
}

1731 1732 1733 1734 1735 1736 1737 1738 1739
/* Completion tracking for deferred_init_memmap() threads */
static atomic_t pgdat_init_n_undone __initdata;
static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);

static inline void __init pgdat_init_report_one_done(void)
{
	if (atomic_dec_and_test(&pgdat_init_n_undone))
		complete(&pgdat_init_all_done_comp);
}
1740

1741
/*
1742 1743 1744 1745 1746 1747 1748 1749
 * Returns true if page needs to be initialized or freed to buddy allocator.
 *
 * First we check if pfn is valid on architectures where it is possible to have
 * holes within pageblock_nr_pages. On systems where it is not possible, this
 * function is optimized out.
 *
 * Then, we check if a current large page is valid by only checking the validity
 * of the head pfn.
1750
 */
1751
static inline bool __init deferred_pfn_valid(unsigned long pfn)
1752
{
1753 1754 1755 1756 1757 1758
	if (!pfn_valid_within(pfn))
		return false;
	if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
		return false;
	return true;
}
1759

1760 1761 1762 1763
/*
 * Free pages to buddy allocator. Try to free aligned pages in
 * pageblock_nr_pages sizes.
 */
1764
static void __init deferred_free_pages(unsigned long pfn,
1765 1766 1767 1768
				       unsigned long end_pfn)
{
	unsigned long nr_pgmask = pageblock_nr_pages - 1;
	unsigned long nr_free = 0;
1769

1770
	for (; pfn < end_pfn; pfn++) {
1771
		if (!deferred_pfn_valid(pfn)) {
1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782
			deferred_free_range(pfn - nr_free, nr_free);
			nr_free = 0;
		} else if (!(pfn & nr_pgmask)) {
			deferred_free_range(pfn - nr_free, nr_free);
			nr_free = 1;
		} else {
			nr_free++;
		}
	}
	/* Free the last block of pages to allocator */
	deferred_free_range(pfn - nr_free, nr_free);
1783 1784
}

1785 1786 1787 1788 1789
/*
 * Initialize struct pages.  We minimize pfn page lookups and scheduler checks
 * by performing it only once every pageblock_nr_pages.
 * Return number of pages initialized.
 */
1790
static unsigned long  __init deferred_init_pages(struct zone *zone,
1791 1792
						 unsigned long pfn,
						 unsigned long end_pfn)
1793 1794
{
	unsigned long nr_pgmask = pageblock_nr_pages - 1;
1795
	int nid = zone_to_nid(zone);
1796
	unsigned long nr_pages = 0;
1797
	int zid = zone_idx(zone);
1798 1799
	struct page *page = NULL;

1800
	for (; pfn < end_pfn; pfn++) {
1801
		if (!deferred_pfn_valid(pfn)) {
1802
			page = NULL;
1803
			continue;
1804
		} else if (!page || !(pfn & nr_pgmask)) {
1805
			page = pfn_to_page(pfn);
1806 1807
		} else {
			page++;
1808
		}
1809
		__init_single_page(page, pfn, zid, nid);
1810
		nr_pages++;
1811
	}
1812
	return (nr_pages);
1813 1814
}

1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898
/*
 * This function is meant to pre-load the iterator for the zone init.
 * Specifically it walks through the ranges until we are caught up to the
 * first_init_pfn value and exits there. If we never encounter the value we
 * return false indicating there are no valid ranges left.
 */
static bool __init
deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
				    unsigned long *spfn, unsigned long *epfn,
				    unsigned long first_init_pfn)
{
	u64 j;

	/*
	 * Start out by walking through the ranges in this zone that have
	 * already been initialized. We don't need to do anything with them
	 * so we just need to flush them out of the system.
	 */
	for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
		if (*epfn <= first_init_pfn)
			continue;
		if (*spfn < first_init_pfn)
			*spfn = first_init_pfn;
		*i = j;
		return true;
	}

	return false;
}

/*
 * Initialize and free pages. We do it in two loops: first we initialize
 * struct page, then free to buddy allocator, because while we are
 * freeing pages we can access pages that are ahead (computing buddy
 * page in __free_one_page()).
 *
 * In order to try and keep some memory in the cache we have the loop
 * broken along max page order boundaries. This way we will not cause
 * any issues with the buddy page computation.
 */
static unsigned long __init
deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
		       unsigned long *end_pfn)
{
	unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
	unsigned long spfn = *start_pfn, epfn = *end_pfn;
	unsigned long nr_pages = 0;
	u64 j = *i;

	/* First we loop through and initialize the page values */
	for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
		unsigned long t;

		if (mo_pfn <= *start_pfn)
			break;

		t = min(mo_pfn, *end_pfn);
		nr_pages += deferred_init_pages(zone, *start_pfn, t);

		if (mo_pfn < *end_pfn) {
			*start_pfn = mo_pfn;
			break;
		}
	}

	/* Reset values and now loop through freeing pages as needed */
	swap(j, *i);

	for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
		unsigned long t;

		if (mo_pfn <= spfn)
			break;

		t = min(mo_pfn, epfn);
		deferred_free_pages(spfn, t);

		if (mo_pfn <= epfn)
			break;
	}

	return nr_pages;
}

1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918
static void __init
deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn,
			   void *arg)
{
	unsigned long spfn, epfn;
	struct zone *zone = arg;
	u64 i;

	deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn, start_pfn);

	/*
	 * Initialize and free pages in MAX_ORDER sized increments so that we
	 * can avoid introducing any issues with the buddy allocator.
	 */
	while (spfn < end_pfn) {
		deferred_init_maxorder(&i, zone, &spfn, &epfn);
		cond_resched();
	}
}

1919 1920 1921 1922 1923 1924 1925
/* An arch may override for more concurrency. */
__weak int __init
deferred_page_init_max_threads(const struct cpumask *node_cpumask)
{
	return 1;
}

1926
/* Initialise remaining memory on a node */
1927
static int __init deferred_init_memmap(void *data)
1928
{
1929
	pg_data_t *pgdat = data;
1930
	const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1931
	unsigned long spfn = 0, epfn = 0;
1932
	unsigned long first_init_pfn, flags;
1933 1934
	unsigned long start = jiffies;
	struct zone *zone;
1935
	int zid, max_threads;
1936
	u64 i;
1937

1938 1939 1940 1941 1942 1943
	/* Bind memory initialisation thread to a local node if possible */
	if (!cpumask_empty(cpumask))
		set_cpus_allowed_ptr(current, cpumask);

	pgdat_resize_lock(pgdat, &flags);
	first_init_pfn = pgdat->first_deferred_pfn;
1944
	if (first_init_pfn == ULONG_MAX) {
1945
		pgdat_resize_unlock(pgdat, &flags);
1946
		pgdat_init_report_one_done();
1947 1948 1949
		return 0;
	}

1950 1951 1952 1953 1954
	/* Sanity check boundaries */
	BUG_ON(pgdat->first_deferred_pfn < pgdat->node_start_pfn);
	BUG_ON(pgdat->first_deferred_pfn > pgdat_end_pfn(pgdat));
	pgdat->first_deferred_pfn = ULONG_MAX;

1955 1956 1957 1958 1959 1960 1961
	/*
	 * Once we unlock here, the zone cannot be grown anymore, thus if an
	 * interrupt thread must allocate this early in boot, zone must be
	 * pre-grown prior to start of deferred page initialization.
	 */
	pgdat_resize_unlock(pgdat, &flags);

1962 1963 1964 1965 1966 1967
	/* Only the highest zone is deferred so find it */
	for (zid = 0; zid < MAX_NR_ZONES; zid++) {
		zone = pgdat->node_zones + zid;
		if (first_init_pfn < zone_end_pfn(zone))
			break;
	}
1968 1969 1970 1971 1972

	/* If the zone is empty somebody else may have cleared out the zone */
	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
						 first_init_pfn))
		goto zone_empty;
1973

1974
	max_threads = deferred_page_init_max_threads(cpumask);
1975

1976
	while (spfn < epfn) {
1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990
		unsigned long epfn_align = ALIGN(epfn, PAGES_PER_SECTION);
		struct padata_mt_job job = {
			.thread_fn   = deferred_init_memmap_chunk,
			.fn_arg      = zone,
			.start       = spfn,
			.size        = epfn_align - spfn,
			.align       = PAGES_PER_SECTION,
			.min_chunk   = PAGES_PER_SECTION,
			.max_threads = max_threads,
		};

		padata_do_multithreaded(&job);
		deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
						    epfn_align);
1991
	}
1992
zone_empty:
1993 1994 1995
	/* Sanity check that the next zone really is unpopulated */
	WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));

1996 1997
	pr_info("node %d deferred pages initialised in %ums\n",
		pgdat->node_id, jiffies_to_msecs(jiffies - start));
1998 1999

	pgdat_init_report_one_done();
2000 2001
	return 0;
}
2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021

/*
 * If this zone has deferred pages, try to grow it by initializing enough
 * deferred pages to satisfy the allocation specified by order, rounded up to
 * the nearest PAGES_PER_SECTION boundary.  So we're adding memory in increments
 * of SECTION_SIZE bytes by initializing struct pages in increments of
 * PAGES_PER_SECTION * sizeof(struct page) bytes.
 *
 * Return true when zone was grown, otherwise return false. We return true even
 * when we grow less than requested, to let the caller decide if there are
 * enough pages to satisfy the allocation.
 *
 * Note: We use noinline because this function is needed only during boot, and
 * it is called from a __ref function _deferred_grow_zone. This way we are
 * making sure that it is not inlined into permanent text section.
 */
static noinline bool __init
deferred_grow_zone(struct zone *zone, unsigned int order)
{
	unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
2022
	pg_data_t *pgdat = zone->zone_pgdat;
2023
	unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
2024 2025
	unsigned long spfn, epfn, flags;
	unsigned long nr_pages = 0;
2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042
	u64 i;

	/* Only the last zone may have deferred pages */
	if (zone_end_pfn(zone) != pgdat_end_pfn(pgdat))
		return false;

	pgdat_resize_lock(pgdat, &flags);

	/*
	 * If someone grew this zone while we were waiting for spinlock, return
	 * true, as there might be enough pages already.
	 */
	if (first_deferred_pfn != pgdat->first_deferred_pfn) {
		pgdat_resize_unlock(pgdat, &flags);
		return true;
	}

2043 2044 2045 2046
	/* If the zone is empty somebody else may have cleared out the zone */
	if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
						 first_deferred_pfn)) {
		pgdat->first_deferred_pfn = ULONG_MAX;
2047
		pgdat_resize_unlock(pgdat, &flags);
2048 2049
		/* Retry only once. */
		return first_deferred_pfn != ULONG_MAX;
2050 2051
	}

2052 2053 2054 2055 2056 2057 2058 2059 2060 2061
	/*
	 * Initialize and free pages in MAX_ORDER sized increments so
	 * that we can avoid introducing any issues with the buddy
	 * allocator.
	 */
	while (spfn < epfn) {
		/* update our first deferred PFN for this section */
		first_deferred_pfn = spfn;

		nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
2062
		touch_nmi_watchdog();
2063

2064 2065 2066
		/* We should only stop along section boundaries */
		if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
			continue;
2067

2068
		/* If our quota has been met we can stop here */
2069 2070 2071 2072
		if (nr_pages >= nr_pages_needed)
			break;
	}

2073
	pgdat->first_deferred_pfn = spfn;
2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090
	pgdat_resize_unlock(pgdat, &flags);

	return nr_pages > 0;
}

/*
 * deferred_grow_zone() is __init, but it is called from
 * get_page_from_freelist() during early boot until deferred_pages permanently
 * disables this call. This is why we have refdata wrapper to avoid warning,
 * and to ensure that the function body gets unloaded.
 */
static bool __ref
_deferred_grow_zone(struct zone *zone, unsigned int order)
{
	return deferred_grow_zone(zone, order);
}

2091
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
2092 2093 2094

void __init page_alloc_init_late(void)
{
2095
	struct zone *zone;
2096
	int nid;
2097 2098

#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
2099

2100 2101
	/* There will be num_node_state(N_MEMORY) threads */
	atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
2102 2103 2104 2105 2106
	for_each_node_state(nid, N_MEMORY) {
		kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
	}

	/* Block until all are initialised */
2107
	wait_for_completion(&pgdat_init_all_done_comp);
2108

2109 2110 2111 2112 2113 2114 2115 2116
	/*
	 * The number of managed pages has changed due to the initialisation
	 * so the pcpu batch and high limits needs to be updated or the limits
	 * will be artificially small.
	 */
	for_each_populated_zone(zone)
		zone_pcp_update(zone);

2117 2118 2119 2120 2121 2122
	/*
	 * We initialized the rest of the deferred pages.  Permanently disable
	 * on-demand struct page initialization.
	 */
	static_branch_disable(&deferred_pages);

2123 2124
	/* Reinit limits that are based on free pages after the kernel is up */
	files_maxfiles_init();
2125
#endif
2126

2127 2128
	buffer_init();

P
Pavel Tatashin 已提交
2129 2130
	/* Discard memblock private memory */
	memblock_discard();
2131

2132 2133 2134
	for_each_node_state(nid, N_MEMORY)
		shuffle_free_memory(NODE_DATA(nid));

2135 2136
	for_each_populated_zone(zone)
		set_zone_contiguous(zone);
2137 2138
}

2139
#ifdef CONFIG_CMA
2140
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
2141 2142 2143 2144 2145 2146 2147 2148
void __init init_cma_reserved_pageblock(struct page *page)
{
	unsigned i = pageblock_nr_pages;
	struct page *p = page;

	do {
		__ClearPageReserved(p);
		set_page_count(p, 0);
2149
	} while (++p, --i);
2150 2151

	set_pageblock_migratetype(page, MIGRATE_CMA);
2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165

	if (pageblock_order >= MAX_ORDER) {
		i = pageblock_nr_pages;
		p = page;
		do {
			set_page_refcounted(p);
			__free_pages(p, MAX_ORDER - 1);
			p += MAX_ORDER_NR_PAGES;
		} while (i -= MAX_ORDER_NR_PAGES);
	} else {
		set_page_refcounted(page);
		__free_pages(page, pageblock_order);
	}

2166
	adjust_managed_page_count(page, pageblock_nr_pages);
2167 2168
}
#endif
L
Linus Torvalds 已提交
2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181

/*
 * The order of subdivision here is critical for the IO subsystem.
 * Please do not alter this order without good reasons and regression
 * testing. Specifically, as large blocks of memory are subdivided,
 * the order in which smaller blocks are delivered depends on the order
 * they're subdivided in this function. This is the primary factor
 * influencing the order in which pages are delivered to the IO
 * subsystem according to empirical testing, and this is also justified
 * by considering the behavior of a buddy system containing a single
 * large block of memory acted on by a series of small allocations.
 * This behavior is a critical factor in sglist merging's success.
 *
2182
 * -- nyc
L
Linus Torvalds 已提交
2183
 */
N
Nick Piggin 已提交
2184
static inline void expand(struct zone *zone, struct page *page,
2185
	int low, int high, int migratetype)
L
Linus Torvalds 已提交
2186 2187 2188 2189 2190 2191
{
	unsigned long size = 1 << high;

	while (high > low) {
		high--;
		size >>= 1;
2192
		VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
2193

2194 2195 2196 2197 2198 2199 2200
		/*
		 * Mark as guard pages (or page), that will allow to
		 * merge back to allocator when buddy will be freed.
		 * Corresponding page table entries will not be touched,
		 * pages will stay not present in virtual address space
		 */
		if (set_page_guard(zone, &page[size], high, migratetype))
2201
			continue;
2202

2203
		add_to_free_list(&page[size], zone, high, migratetype);
2204
		set_buddy_order(&page[size], high);
L
Linus Torvalds 已提交
2205 2206 2207
	}
}

2208
static void check_new_page_bad(struct page *page)
L
Linus Torvalds 已提交
2209
{
2210
	if (unlikely(page->flags & __PG_HWPOISON)) {
2211 2212 2213
		/* Don't complain about hwpoisoned pages */
		page_mapcount_reset(page); /* remove PageBuddy */
		return;
2214
	}
2215 2216 2217

	bad_page(page,
		 page_bad_reason(page, PAGE_FLAGS_CHECK_AT_PREP));
2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230
}

/*
 * This page is about to be returned from the page allocator
 */
static inline int check_new_page(struct page *page)
{
	if (likely(page_expected_state(page,
				PAGE_FLAGS_CHECK_AT_PREP|__PG_HWPOISON)))
		return 0;

	check_new_page_bad(page);
	return 1;
2231 2232
}

2233
#ifdef CONFIG_DEBUG_VM
2234 2235 2236 2237 2238 2239
/*
 * With DEBUG_VM enabled, order-0 pages are checked for expected state when
 * being allocated from pcp lists. With debug_pagealloc also enabled, they are
 * also checked when pcp lists are refilled from the free lists.
 */
static inline bool check_pcp_refill(struct page *page)
2240
{
2241
	if (debug_pagealloc_enabled_static())
2242 2243 2244
		return check_new_page(page);
	else
		return false;
2245 2246
}

2247
static inline bool check_new_pcp(struct page *page)
2248 2249 2250 2251
{
	return check_new_page(page);
}
#else
2252 2253 2254 2255 2256 2257
/*
 * With DEBUG_VM disabled, free order-0 pages are checked for expected state
 * when pcp lists are being refilled from the free lists. With debug_pagealloc
 * enabled, they are also checked when being allocated from the pcp lists.
 */
static inline bool check_pcp_refill(struct page *page)
2258 2259 2260
{
	return check_new_page(page);
}
2261
static inline bool check_new_pcp(struct page *page)
2262
{
2263
	if (debug_pagealloc_enabled_static())
2264 2265 2266
		return check_new_page(page);
	else
		return false;
2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282
}
#endif /* CONFIG_DEBUG_VM */

static bool check_new_pages(struct page *page, unsigned int order)
{
	int i;
	for (i = 0; i < (1 << order); i++) {
		struct page *p = page + i;

		if (unlikely(check_new_page(p)))
			return true;
	}

	return false;
}

2283 2284 2285 2286 2287 2288 2289
inline void post_alloc_hook(struct page *page, unsigned int order,
				gfp_t gfp_flags)
{
	set_page_private(page, 0);
	set_page_refcounted(page);

	arch_alloc_page(page, order);
2290
	debug_pagealloc_map_pages(page, 1 << order);
2291
	kasan_alloc_pages(page, order);
2292
	kernel_unpoison_pages(page, 1 << order);
2293
	set_page_owner(page, order, gfp_flags);
2294

2295
	if (!want_init_on_free() && want_init_on_alloc(gfp_flags))
2296
		kernel_init_free_pages(page, 1 << order);
2297 2298
}

2299
static void prep_new_page(struct page *page, unsigned int order, gfp_t gfp_flags,
2300
							unsigned int alloc_flags)
2301
{
2302
	post_alloc_hook(page, order, gfp_flags);
N
Nick Piggin 已提交
2303 2304 2305 2306

	if (order && (gfp_flags & __GFP_COMP))
		prep_compound_page(page, order);

2307
	/*
2308
	 * page is set pfmemalloc when ALLOC_NO_WATERMARKS was necessary to
2309 2310 2311 2312
	 * allocate the page. The expectation is that the caller is taking
	 * steps that will free more memory. The caller should avoid the page
	 * being used for !PFMEMALLOC purposes.
	 */
2313 2314 2315 2316
	if (alloc_flags & ALLOC_NO_WATERMARKS)
		set_page_pfmemalloc(page);
	else
		clear_page_pfmemalloc(page);
L
Linus Torvalds 已提交
2317 2318
}

2319 2320 2321 2322
/*
 * Go through the free lists for the given migratetype and remove
 * the smallest available page from the freelists
 */
2323
static __always_inline
2324
struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
2325 2326 2327
						int migratetype)
{
	unsigned int current_order;
2328
	struct free_area *area;
2329 2330 2331 2332 2333
	struct page *page;

	/* Find a page of the appropriate size in the preferred list */
	for (current_order = order; current_order < MAX_ORDER; ++current_order) {
		area = &(zone->free_area[current_order]);
2334
		page = get_page_from_free_area(area, migratetype);
2335 2336
		if (!page)
			continue;
2337 2338
		del_page_from_free_list(page, zone, current_order);
		expand(zone, page, order, current_order, migratetype);
2339
		set_pcppage_migratetype(page, migratetype);
2340 2341 2342 2343 2344 2345 2346
		return page;
	}

	return NULL;
}


2347 2348 2349 2350
/*
 * This array describes the order lists are fallen back to when
 * the free lists for the desirable migrate type are depleted
 */
2351
static int fallbacks[MIGRATE_TYPES][3] = {
2352 2353
	[MIGRATE_UNMOVABLE]   = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE,   MIGRATE_TYPES },
	[MIGRATE_MOVABLE]     = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_TYPES },
2354
	[MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE,   MIGRATE_MOVABLE,   MIGRATE_TYPES },
2355
#ifdef CONFIG_CMA
2356
	[MIGRATE_CMA]         = { MIGRATE_TYPES }, /* Never used */
2357
#endif
2358
#ifdef CONFIG_MEMORY_ISOLATION
2359
	[MIGRATE_ISOLATE]     = { MIGRATE_TYPES }, /* Never used */
2360
#endif
2361 2362
};

2363
#ifdef CONFIG_CMA
2364
static __always_inline struct page *__rmqueue_cma_fallback(struct zone *zone,
2365 2366 2367 2368 2369 2370 2371 2372 2373
					unsigned int order)
{
	return __rmqueue_smallest(zone, order, MIGRATE_CMA);
}
#else
static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
					unsigned int order) { return NULL; }
#endif

2374
/*
2375
 * Move the free pages in a range to the freelist tail of the requested type.
2376
 * Note that start_page and end_pages are not aligned on a pageblock
2377 2378
 * boundary. If alignment is required, use move_freepages_block()
 */
2379
static int move_freepages(struct zone *zone,
A
Adrian Bunk 已提交
2380
			  struct page *start_page, struct page *end_page,
2381
			  int migratetype, int *num_movable)
2382 2383
{
	struct page *page;
2384
	unsigned int order;
2385
	int pages_moved = 0;
2386 2387 2388 2389 2390 2391 2392 2393

	for (page = start_page; page <= end_page;) {
		if (!pfn_valid_within(page_to_pfn(page))) {
			page++;
			continue;
		}

		if (!PageBuddy(page)) {
2394 2395 2396 2397 2398 2399 2400 2401 2402
			/*
			 * We assume that pages that could be isolated for
			 * migration are movable. But we don't actually try
			 * isolating, as that would be expensive.
			 */
			if (num_movable &&
					(PageLRU(page) || __PageMovable(page)))
				(*num_movable)++;

2403 2404 2405 2406
			page++;
			continue;
		}

2407 2408 2409 2410
		/* Make sure we are not inadvertently changing nodes */
		VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
		VM_BUG_ON_PAGE(page_zone(page) != zone, page);

2411
		order = buddy_order(page);
2412
		move_to_free_list(page, zone, order, migratetype);
2413
		page += 1 << order;
2414
		pages_moved += 1 << order;
2415 2416
	}

2417
	return pages_moved;
2418 2419
}

2420
int move_freepages_block(struct zone *zone, struct page *page,
2421
				int migratetype, int *num_movable)
2422 2423 2424 2425
{
	unsigned long start_pfn, end_pfn;
	struct page *start_page, *end_page;

2426 2427 2428
	if (num_movable)
		*num_movable = 0;

2429
	start_pfn = page_to_pfn(page);
2430
	start_pfn = start_pfn & ~(pageblock_nr_pages-1);
2431
	start_page = pfn_to_page(start_pfn);
2432 2433
	end_page = start_page + pageblock_nr_pages - 1;
	end_pfn = start_pfn + pageblock_nr_pages - 1;
2434 2435

	/* Do not cross zone boundaries */
2436
	if (!zone_spans_pfn(zone, start_pfn))
2437
		start_page = page;
2438
	if (!zone_spans_pfn(zone, end_pfn))
2439 2440
		return 0;

2441 2442
	return move_freepages(zone, start_page, end_page, migratetype,
								num_movable);
2443 2444
}

2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455
static void change_pageblock_range(struct page *pageblock_page,
					int start_order, int migratetype)
{
	int nr_pageblocks = 1 << (start_order - pageblock_order);

	while (nr_pageblocks--) {
		set_pageblock_migratetype(pageblock_page, migratetype);
		pageblock_page += pageblock_nr_pages;
	}
}

2456
/*
2457 2458 2459 2460 2461 2462 2463 2464 2465 2466
 * When we are falling back to another migratetype during allocation, try to
 * steal extra free pages from the same pageblocks to satisfy further
 * allocations, instead of polluting multiple pageblocks.
 *
 * If we are stealing a relatively large buddy page, it is likely there will
 * be more free pages in the pageblock, so try to steal them all. For
 * reclaimable and unmovable allocations, we steal regardless of page size,
 * as fragmentation caused by those allocations polluting movable pageblocks
 * is worse than movable allocations stealing from unmovable and reclaimable
 * pageblocks.
2467
 */
2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488
static bool can_steal_fallback(unsigned int order, int start_mt)
{
	/*
	 * Leaving this order check is intended, although there is
	 * relaxed order check in next check. The reason is that
	 * we can actually steal whole pageblock if this condition met,
	 * but, below check doesn't guarantee it and that is just heuristic
	 * so could be changed anytime.
	 */
	if (order >= pageblock_order)
		return true;

	if (order >= pageblock_order / 2 ||
		start_mt == MIGRATE_RECLAIMABLE ||
		start_mt == MIGRATE_UNMOVABLE ||
		page_group_by_mobility_disabled)
		return true;

	return false;
}

2489
static inline bool boost_watermark(struct zone *zone)
2490 2491 2492 2493
{
	unsigned long max_boost;

	if (!watermark_boost_factor)
2494
		return false;
2495 2496 2497 2498 2499 2500 2501
	/*
	 * Don't bother in zones that are unlikely to produce results.
	 * On small machines, including kdump capture kernels running
	 * in a small area, boosting the watermark can cause an out of
	 * memory situation immediately.
	 */
	if ((pageblock_nr_pages * 4) > zone_managed_pages(zone))
2502
		return false;
2503 2504 2505

	max_boost = mult_frac(zone->_watermark[WMARK_HIGH],
			watermark_boost_factor, 10000);
2506 2507 2508 2509 2510 2511 2512 2513 2514 2515

	/*
	 * high watermark may be uninitialised if fragmentation occurs
	 * very early in boot so do not boost. We do not fall
	 * through and boost by pageblock_nr_pages as failing
	 * allocations that early means that reclaim is not going
	 * to help and it may even be impossible to reclaim the
	 * boosted watermark resulting in a hang.
	 */
	if (!max_boost)
2516
		return false;
2517

2518 2519 2520 2521
	max_boost = max(pageblock_nr_pages, max_boost);

	zone->watermark_boost = min(zone->watermark_boost + pageblock_nr_pages,
		max_boost);
2522 2523

	return true;
2524 2525
}

2526 2527 2528
/*
 * This function implements actual steal behaviour. If order is large enough,
 * we can steal whole pageblock. If not, we first move freepages in this
2529 2530 2531 2532
 * pageblock to our migratetype and determine how many already-allocated pages
 * are there in the pageblock with a compatible migratetype. If at least half
 * of pages are free or compatible, we can change migratetype of the pageblock
 * itself, so pages freed in the future will be put on the correct free list.
2533 2534
 */
static void steal_suitable_fallback(struct zone *zone, struct page *page,
2535
		unsigned int alloc_flags, int start_type, bool whole_block)
2536
{
2537
	unsigned int current_order = buddy_order(page);
2538 2539 2540 2541
	int free_pages, movable_pages, alike_pages;
	int old_block_type;

	old_block_type = get_pageblock_migratetype(page);
2542

2543 2544 2545 2546
	/*
	 * This can happen due to races and we want to prevent broken
	 * highatomic accounting.
	 */
2547
	if (is_migrate_highatomic(old_block_type))
2548 2549
		goto single_page;

2550 2551 2552
	/* Take ownership for orders >= pageblock_order */
	if (current_order >= pageblock_order) {
		change_pageblock_range(page, current_order, start_type);
2553
		goto single_page;
2554 2555
	}

2556 2557 2558 2559 2560
	/*
	 * Boost watermarks to increase reclaim pressure to reduce the
	 * likelihood of future fallbacks. Wake kswapd now as the node
	 * may be balanced overall and kswapd will not wake naturally.
	 */
2561
	if (boost_watermark(zone) && (alloc_flags & ALLOC_KSWAPD))
2562
		set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2563

2564 2565 2566 2567
	/* We are not allowed to try stealing from the whole block */
	if (!whole_block)
		goto single_page;

2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591
	free_pages = move_freepages_block(zone, page, start_type,
						&movable_pages);
	/*
	 * Determine how many pages are compatible with our allocation.
	 * For movable allocation, it's the number of movable pages which
	 * we just obtained. For other types it's a bit more tricky.
	 */
	if (start_type == MIGRATE_MOVABLE) {
		alike_pages = movable_pages;
	} else {
		/*
		 * If we are falling back a RECLAIMABLE or UNMOVABLE allocation
		 * to MOVABLE pageblock, consider all non-movable pages as
		 * compatible. If it's UNMOVABLE falling back to RECLAIMABLE or
		 * vice versa, be conservative since we can't distinguish the
		 * exact migratetype of non-movable pages.
		 */
		if (old_block_type == MIGRATE_MOVABLE)
			alike_pages = pageblock_nr_pages
						- (free_pages + movable_pages);
		else
			alike_pages = 0;
	}

2592
	/* moving whole block can fail due to zone boundary conditions */
2593
	if (!free_pages)
2594
		goto single_page;
2595

2596 2597 2598 2599 2600
	/*
	 * If a sufficient number of pages in the block are either free or of
	 * comparable migratability as our allocation, claim the whole block.
	 */
	if (free_pages + alike_pages >= (1 << (pageblock_order-1)) ||
2601 2602
			page_group_by_mobility_disabled)
		set_pageblock_migratetype(page, start_type);
2603 2604 2605 2606

	return;

single_page:
2607
	move_to_free_list(page, zone, current_order, start_type);
2608 2609
}

2610 2611 2612 2613 2614 2615 2616 2617
/*
 * Check whether there is a suitable fallback freepage with requested order.
 * If only_stealable is true, this function returns fallback_mt only if
 * we can steal other freepages all together. This would help to reduce
 * fragmentation due to mixed migratetype pages in one pageblock.
 */
int find_suitable_fallback(struct free_area *area, unsigned int order,
			int migratetype, bool only_stealable, bool *can_steal)
2618 2619 2620 2621 2622 2623 2624 2625 2626 2627
{
	int i;
	int fallback_mt;

	if (area->nr_free == 0)
		return -1;

	*can_steal = false;
	for (i = 0;; i++) {
		fallback_mt = fallbacks[migratetype][i];
2628
		if (fallback_mt == MIGRATE_TYPES)
2629 2630
			break;

2631
		if (free_area_empty(area, fallback_mt))
2632
			continue;
2633

2634 2635 2636
		if (can_steal_fallback(order, migratetype))
			*can_steal = true;

2637 2638 2639 2640 2641
		if (!only_stealable)
			return fallback_mt;

		if (*can_steal)
			return fallback_mt;
2642
	}
2643 2644

	return -1;
2645 2646
}

2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660
/*
 * Reserve a pageblock for exclusive use of high-order atomic allocations if
 * there are no empty page blocks that contain a page with a suitable order
 */
static void reserve_highatomic_pageblock(struct page *page, struct zone *zone,
				unsigned int alloc_order)
{
	int mt;
	unsigned long max_managed, flags;

	/*
	 * Limit the number reserved to 1 pageblock or roughly 1% of a zone.
	 * Check is race-prone but harmless.
	 */
2661
	max_managed = (zone_managed_pages(zone) / 100) + pageblock_nr_pages;
2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672
	if (zone->nr_reserved_highatomic >= max_managed)
		return;

	spin_lock_irqsave(&zone->lock, flags);

	/* Recheck the nr_reserved_highatomic limit under the lock */
	if (zone->nr_reserved_highatomic >= max_managed)
		goto out_unlock;

	/* Yoink! */
	mt = get_pageblock_migratetype(page);
2673 2674
	if (!is_migrate_highatomic(mt) && !is_migrate_isolate(mt)
	    && !is_migrate_cma(mt)) {
2675 2676
		zone->nr_reserved_highatomic += pageblock_nr_pages;
		set_pageblock_migratetype(page, MIGRATE_HIGHATOMIC);
2677
		move_freepages_block(zone, page, MIGRATE_HIGHATOMIC, NULL);
2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688
	}

out_unlock:
	spin_unlock_irqrestore(&zone->lock, flags);
}

/*
 * Used when an allocation is about to fail under memory pressure. This
 * potentially hurts the reliability of high-order allocations when under
 * intense memory pressure but failed atomic allocations should be easier
 * to recover from than an OOM.
2689 2690 2691
 *
 * If @force is true, try to unreserve a pageblock even though highatomic
 * pageblock is exhausted.
2692
 */
2693 2694
static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
						bool force)
2695 2696 2697 2698 2699 2700 2701
{
	struct zonelist *zonelist = ac->zonelist;
	unsigned long flags;
	struct zoneref *z;
	struct zone *zone;
	struct page *page;
	int order;
2702
	bool ret;
2703

2704
	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->highest_zoneidx,
2705
								ac->nodemask) {
2706 2707 2708 2709 2710 2711
		/*
		 * Preserve at least one pageblock unless memory pressure
		 * is really high.
		 */
		if (!force && zone->nr_reserved_highatomic <=
					pageblock_nr_pages)
2712 2713 2714 2715 2716 2717
			continue;

		spin_lock_irqsave(&zone->lock, flags);
		for (order = 0; order < MAX_ORDER; order++) {
			struct free_area *area = &(zone->free_area[order]);

2718
			page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
2719
			if (!page)
2720 2721 2722
				continue;

			/*
2723 2724 2725 2726 2727
			 * In page freeing path, migratetype change is racy so
			 * we can counter several free pages in a pageblock
			 * in this loop althoug we changed the pageblock type
			 * from highatomic to ac->migratetype. So we should
			 * adjust the count once.
2728
			 */
2729
			if (is_migrate_highatomic_page(page)) {
2730 2731 2732 2733 2734 2735 2736 2737 2738 2739 2740
				/*
				 * It should never happen but changes to
				 * locking could inadvertently allow a per-cpu
				 * drain to add pages to MIGRATE_HIGHATOMIC
				 * while unreserving so be safe and watch for
				 * underflows.
				 */
				zone->nr_reserved_highatomic -= min(
						pageblock_nr_pages,
						zone->nr_reserved_highatomic);
			}
2741 2742 2743 2744 2745 2746 2747 2748 2749 2750 2751

			/*
			 * Convert to ac->migratetype and avoid the normal
			 * pageblock stealing heuristics. Minimally, the caller
			 * is doing the work and needs the pages. More
			 * importantly, if the block was always converted to
			 * MIGRATE_UNMOVABLE or another type then the number
			 * of pageblocks that cannot be completely freed
			 * may increase.
			 */
			set_pageblock_migratetype(page, ac->migratetype);
2752 2753
			ret = move_freepages_block(zone, page, ac->migratetype,
									NULL);
2754 2755 2756 2757
			if (ret) {
				spin_unlock_irqrestore(&zone->lock, flags);
				return ret;
			}
2758 2759 2760
		}
		spin_unlock_irqrestore(&zone->lock, flags);
	}
2761 2762

	return false;
2763 2764
}

2765 2766 2767 2768 2769
/*
 * Try finding a free buddy page on the fallback list and put it on the free
 * list of requested migratetype, possibly along with other pages from the same
 * block, depending on fragmentation avoidance heuristics. Returns true if
 * fallback was found so that __rmqueue_smallest() can grab it.
2770 2771 2772 2773
 *
 * The use of signed ints for order and current_order is a deliberate
 * deviation from the rest of this file, to make the for loop
 * condition simpler.
2774
 */
2775
static __always_inline bool
2776 2777
__rmqueue_fallback(struct zone *zone, int order, int start_migratetype,
						unsigned int alloc_flags)
2778
{
2779
	struct free_area *area;
2780
	int current_order;
2781
	int min_order = order;
2782
	struct page *page;
2783 2784
	int fallback_mt;
	bool can_steal;
2785

2786 2787 2788 2789 2790 2791 2792 2793
	/*
	 * Do not steal pages from freelists belonging to other pageblocks
	 * i.e. orders < pageblock_order. If there are no local zones free,
	 * the zonelists will be reiterated without ALLOC_NOFRAGMENT.
	 */
	if (alloc_flags & ALLOC_NOFRAGMENT)
		min_order = pageblock_order;

2794 2795 2796 2797 2798
	/*
	 * Find the largest available free page in the other list. This roughly
	 * approximates finding the pageblock with the most free pages, which
	 * would be too costly to do exactly.
	 */
2799
	for (current_order = MAX_ORDER - 1; current_order >= min_order;
2800
				--current_order) {
2801 2802
		area = &(zone->free_area[current_order]);
		fallback_mt = find_suitable_fallback(area, current_order,
2803
				start_migratetype, false, &can_steal);
2804 2805
		if (fallback_mt == -1)
			continue;
2806

2807 2808 2809 2810 2811 2812 2813 2814 2815 2816 2817
		/*
		 * We cannot steal all free pages from the pageblock and the
		 * requested migratetype is movable. In that case it's better to
		 * steal and split the smallest available page instead of the
		 * largest available page, because even if the next movable
		 * allocation falls back into a different pageblock than this
		 * one, it won't cause permanent fragmentation.
		 */
		if (!can_steal && start_migratetype == MIGRATE_MOVABLE
					&& current_order > order)
			goto find_smallest;
2818

2819 2820
		goto do_steal;
	}
2821

2822
	return false;
2823

2824 2825 2826 2827 2828 2829 2830 2831
find_smallest:
	for (current_order = order; current_order < MAX_ORDER;
							current_order++) {
		area = &(zone->free_area[current_order]);
		fallback_mt = find_suitable_fallback(area, current_order,
				start_migratetype, false, &can_steal);
		if (fallback_mt != -1)
			break;
2832 2833
	}

2834 2835 2836 2837 2838 2839 2840
	/*
	 * This should not happen - we already found a suitable fallback
	 * when looking for the largest page.
	 */
	VM_BUG_ON(current_order == MAX_ORDER);

do_steal:
2841
	page = get_page_from_free_area(area, fallback_mt);
2842

2843 2844
	steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
								can_steal);
2845 2846 2847 2848 2849 2850

	trace_mm_page_alloc_extfrag(page, order, current_order,
		start_migratetype, fallback_mt);

	return true;

2851 2852
}

2853
/*
L
Linus Torvalds 已提交
2854 2855 2856
 * Do the hard work of removing an element from the buddy allocator.
 * Call me with the zone->lock already held.
 */
2857
static __always_inline struct page *
2858 2859
__rmqueue(struct zone *zone, unsigned int order, int migratetype,
						unsigned int alloc_flags)
L
Linus Torvalds 已提交
2860 2861 2862
{
	struct page *page;

2863 2864 2865 2866 2867 2868
#ifdef CONFIG_CMA
	/*
	 * Balance movable allocations between regular and CMA areas by
	 * allocating from CMA when over half of the zone's free memory
	 * is in the CMA area.
	 */
2869
	if (alloc_flags & ALLOC_CMA &&
2870 2871 2872 2873 2874 2875 2876
	    zone_page_state(zone, NR_FREE_CMA_PAGES) >
	    zone_page_state(zone, NR_FREE_PAGES) / 2) {
		page = __rmqueue_cma_fallback(zone, order);
		if (page)
			return page;
	}
#endif
2877
retry:
2878
	page = __rmqueue_smallest(zone, order, migratetype);
2879
	if (unlikely(!page)) {
2880
		if (alloc_flags & ALLOC_CMA)
2881 2882
			page = __rmqueue_cma_fallback(zone, order);

2883 2884
		if (!page && __rmqueue_fallback(zone, order, migratetype,
								alloc_flags))
2885
			goto retry;
2886 2887
	}

2888
	trace_mm_page_alloc_zone_locked(page, order, migratetype);
2889
	return page;
L
Linus Torvalds 已提交
2890 2891
}

2892
/*
L
Linus Torvalds 已提交
2893 2894 2895 2896
 * Obtain a specified number of elements from the buddy allocator, all under
 * a single hold of the lock, for efficiency.  Add them to the supplied list.
 * Returns the number of new pages which were placed at *list.
 */
2897
static int rmqueue_bulk(struct zone *zone, unsigned int order,
2898
			unsigned long count, struct list_head *list,
2899
			int migratetype, unsigned int alloc_flags)
L
Linus Torvalds 已提交
2900
{
2901
	int i, alloced = 0;
2902

2903
	spin_lock(&zone->lock);
L
Linus Torvalds 已提交
2904
	for (i = 0; i < count; ++i) {
2905 2906
		struct page *page = __rmqueue(zone, order, migratetype,
								alloc_flags);
N
Nick Piggin 已提交
2907
		if (unlikely(page == NULL))
L
Linus Torvalds 已提交
2908
			break;
2909

2910 2911 2912
		if (unlikely(check_pcp_refill(page)))
			continue;

2913
		/*
2914 2915 2916 2917 2918 2919 2920 2921
		 * Split buddy pages returned by expand() are received here in
		 * physical page order. The page is added to the tail of
		 * caller's list. From the callers perspective, the linked list
		 * is ordered by page number under some conditions. This is
		 * useful for IO devices that can forward direction from the
		 * head, thus also in the physical page order. This is useful
		 * for IO devices that can merge IO requests if the physical
		 * pages are ordered properly.
2922
		 */
2923
		list_add_tail(&page->lru, list);
2924
		alloced++;
2925
		if (is_migrate_cma(get_pcppage_migratetype(page)))
2926 2927
			__mod_zone_page_state(zone, NR_FREE_CMA_PAGES,
					      -(1 << order));
L
Linus Torvalds 已提交
2928
	}
2929 2930 2931 2932 2933 2934 2935

	/*
	 * i pages were removed from the buddy list even if some leak due
	 * to check_pcp_refill failing so adjust NR_FREE_PAGES based
	 * on i. Do not confuse with 'alloced' which is the number of
	 * pages added to the pcp list.
	 */
2936
	__mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
2937
	spin_unlock(&zone->lock);
2938
	return alloced;
L
Linus Torvalds 已提交
2939 2940
}

2941
#ifdef CONFIG_NUMA
2942
/*
2943 2944 2945 2946
 * Called from the vmstat counter updater to drain pagesets of this
 * currently executing processor on remote nodes after they have
 * expired.
 *
2947 2948
 * Note that this function must be called with the thread pinned to
 * a single processor.
2949
 */
2950
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
2951 2952
{
	unsigned long flags;
2953
	int to_drain, batch;
2954

2955
	local_irq_save(flags);
2956
	batch = READ_ONCE(pcp->batch);
2957
	to_drain = min(pcp->count, batch);
2958
	if (to_drain > 0)
2959
		free_pcppages_bulk(zone, to_drain, pcp);
2960
	local_irq_restore(flags);
2961 2962 2963
}
#endif

2964
/*
2965
 * Drain pcplists of the indicated processor and zone.
2966 2967 2968 2969 2970
 *
 * The processor must either be the current processor and the
 * thread pinned to the current processor or a processor that
 * is not online.
 */
2971
static void drain_pages_zone(unsigned int cpu, struct zone *zone)
L
Linus Torvalds 已提交
2972
{
N
Nick Piggin 已提交
2973
	unsigned long flags;
2974 2975
	struct per_cpu_pageset *pset;
	struct per_cpu_pages *pcp;
L
Linus Torvalds 已提交
2976

2977 2978
	local_irq_save(flags);
	pset = per_cpu_ptr(zone->pageset, cpu);
L
Linus Torvalds 已提交
2979

2980
	pcp = &pset->pcp;
2981
	if (pcp->count)
2982 2983 2984
		free_pcppages_bulk(zone, pcp->count, pcp);
	local_irq_restore(flags);
}
2985

2986 2987 2988 2989 2990 2991 2992 2993 2994 2995 2996 2997 2998
/*
 * Drain pcplists of all zones on the indicated processor.
 *
 * The processor must either be the current processor and the
 * thread pinned to the current processor or a processor that
 * is not online.
 */
static void drain_pages(unsigned int cpu)
{
	struct zone *zone;

	for_each_populated_zone(zone) {
		drain_pages_zone(cpu, zone);
L
Linus Torvalds 已提交
2999 3000 3001
	}
}

3002 3003
/*
 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
3004 3005 3006
 *
 * The CPU has to be pinned. When zone parameter is non-NULL, spill just
 * the single zone's pages.
3007
 */
3008
void drain_local_pages(struct zone *zone)
3009
{
3010 3011 3012 3013 3014 3015
	int cpu = smp_processor_id();

	if (zone)
		drain_pages_zone(cpu, zone);
	else
		drain_pages(cpu);
3016 3017
}

3018 3019
static void drain_local_pages_wq(struct work_struct *work)
{
3020 3021 3022 3023
	struct pcpu_drain *drain;

	drain = container_of(work, struct pcpu_drain, work);

3024 3025 3026 3027 3028 3029 3030 3031
	/*
	 * drain_all_pages doesn't use proper cpu hotplug protection so
	 * we can race with cpu offline when the WQ can move this from
	 * a cpu pinned worker to an unbound one. We can operate on a different
	 * cpu which is allright but we also have to make sure to not move to
	 * a different one.
	 */
	preempt_disable();
3032
	drain_local_pages(drain->zone);
3033
	preempt_enable();
3034 3035
}

3036
/*
3037 3038
 * The implementation of drain_all_pages(), exposing an extra parameter to
 * drain on all cpus.
3039
 *
3040 3041 3042 3043 3044
 * drain_all_pages() is optimized to only execute on cpus where pcplists are
 * not empty. The check for non-emptiness can however race with a free to
 * pcplist that has not yet increased the pcp->count from 0 to 1. Callers
 * that need the guarantee that every CPU has drained can disable the
 * optimizing racy check.
3045
 */
3046
static void __drain_all_pages(struct zone *zone, bool force_all_cpus)
3047
{
3048 3049 3050 3051 3052 3053 3054 3055
	int cpu;

	/*
	 * Allocate in the BSS so we wont require allocation in
	 * direct reclaim path for CONFIG_CPUMASK_OFFSTACK=y
	 */
	static cpumask_t cpus_with_pcps;

3056 3057 3058 3059 3060 3061 3062
	/*
	 * Make sure nobody triggers this path before mm_percpu_wq is fully
	 * initialized.
	 */
	if (WARN_ON_ONCE(!mm_percpu_wq))
		return;

3063 3064 3065 3066 3067 3068 3069 3070 3071 3072
	/*
	 * Do not drain if one is already in progress unless it's specific to
	 * a zone. Such callers are primarily CMA and memory hotplug and need
	 * the drain to be complete when the call returns.
	 */
	if (unlikely(!mutex_trylock(&pcpu_drain_mutex))) {
		if (!zone)
			return;
		mutex_lock(&pcpu_drain_mutex);
	}
3073

3074 3075 3076 3077 3078 3079 3080
	/*
	 * We don't care about racing with CPU hotplug event
	 * as offline notification will cause the notified
	 * cpu to drain that CPU pcps and on_each_cpu_mask
	 * disables preemption as part of its processing
	 */
	for_each_online_cpu(cpu) {
3081 3082
		struct per_cpu_pageset *pcp;
		struct zone *z;
3083
		bool has_pcps = false;
3084

3085 3086 3087 3088 3089 3090 3091
		if (force_all_cpus) {
			/*
			 * The pcp.count check is racy, some callers need a
			 * guarantee that no cpu is missed.
			 */
			has_pcps = true;
		} else if (zone) {
3092
			pcp = per_cpu_ptr(zone->pageset, cpu);
3093
			if (pcp->pcp.count)
3094
				has_pcps = true;
3095 3096 3097 3098 3099 3100 3101
		} else {
			for_each_populated_zone(z) {
				pcp = per_cpu_ptr(z->pageset, cpu);
				if (pcp->pcp.count) {
					has_pcps = true;
					break;
				}
3102 3103
			}
		}
3104

3105 3106 3107 3108 3109
		if (has_pcps)
			cpumask_set_cpu(cpu, &cpus_with_pcps);
		else
			cpumask_clear_cpu(cpu, &cpus_with_pcps);
	}
3110

3111
	for_each_cpu(cpu, &cpus_with_pcps) {
3112 3113 3114 3115 3116
		struct pcpu_drain *drain = per_cpu_ptr(&pcpu_drain, cpu);

		drain->zone = zone;
		INIT_WORK(&drain->work, drain_local_pages_wq);
		queue_work_on(cpu, mm_percpu_wq, &drain->work);
3117
	}
3118
	for_each_cpu(cpu, &cpus_with_pcps)
3119
		flush_work(&per_cpu_ptr(&pcpu_drain, cpu)->work);
3120 3121

	mutex_unlock(&pcpu_drain_mutex);
3122 3123
}

3124 3125 3126 3127 3128 3129 3130 3131 3132 3133 3134 3135
/*
 * Spill all the per-cpu pages from all CPUs back into the buddy allocator.
 *
 * When zone parameter is non-NULL, spill just the single zone's pages.
 *
 * Note that this can be extremely slow as the draining happens in a workqueue.
 */
void drain_all_pages(struct zone *zone)
{
	__drain_all_pages(zone, false);
}

3136
#ifdef CONFIG_HIBERNATION
L
Linus Torvalds 已提交
3137

3138 3139 3140 3141 3142
/*
 * Touch the watchdog for every WD_PAGE_COUNT pages.
 */
#define WD_PAGE_COUNT	(128*1024)

L
Linus Torvalds 已提交
3143 3144
void mark_free_pages(struct zone *zone)
{
3145
	unsigned long pfn, max_zone_pfn, page_count = WD_PAGE_COUNT;
3146
	unsigned long flags;
3147
	unsigned int order, t;
3148
	struct page *page;
L
Linus Torvalds 已提交
3149

3150
	if (zone_is_empty(zone))
L
Linus Torvalds 已提交
3151 3152 3153
		return;

	spin_lock_irqsave(&zone->lock, flags);
3154

3155
	max_zone_pfn = zone_end_pfn(zone);
3156 3157
	for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
		if (pfn_valid(pfn)) {
3158
			page = pfn_to_page(pfn);
3159

3160 3161 3162 3163 3164
			if (!--page_count) {
				touch_nmi_watchdog();
				page_count = WD_PAGE_COUNT;
			}

3165 3166 3167
			if (page_zone(page) != zone)
				continue;

3168 3169
			if (!swsusp_page_is_forbidden(page))
				swsusp_unset_page_free(page);
3170
		}
L
Linus Torvalds 已提交
3171

3172
	for_each_migratetype_order(order, t) {
3173 3174
		list_for_each_entry(page,
				&zone->free_area[order].free_list[t], lru) {
3175
			unsigned long i;
L
Linus Torvalds 已提交
3176

3177
			pfn = page_to_pfn(page);
3178 3179 3180 3181 3182
			for (i = 0; i < (1UL << order); i++) {
				if (!--page_count) {
					touch_nmi_watchdog();
					page_count = WD_PAGE_COUNT;
				}
3183
				swsusp_set_page_free(pfn_to_page(pfn + i));
3184
			}
3185
		}
3186
	}
L
Linus Torvalds 已提交
3187 3188
	spin_unlock_irqrestore(&zone->lock, flags);
}
3189
#endif /* CONFIG_PM */
L
Linus Torvalds 已提交
3190

3191
static bool free_unref_page_prepare(struct page *page, unsigned long pfn)
L
Linus Torvalds 已提交
3192
{
3193
	int migratetype;
L
Linus Torvalds 已提交
3194

3195
	if (!free_pcp_prepare(page))
3196
		return false;
3197

3198
	migratetype = get_pfnblock_migratetype(page, pfn);
3199
	set_pcppage_migratetype(page, migratetype);
3200 3201 3202
	return true;
}

3203
static void free_unref_page_commit(struct page *page, unsigned long pfn)
3204 3205 3206 3207 3208 3209
{
	struct zone *zone = page_zone(page);
	struct per_cpu_pages *pcp;
	int migratetype;

	migratetype = get_pcppage_migratetype(page);
3210
	__count_vm_event(PGFREE);
3211

3212 3213 3214
	/*
	 * We only track unmovable, reclaimable and movable on pcp lists.
	 * Free ISOLATE pages back to the allocator because they are being
3215
	 * offlined but treat HIGHATOMIC as movable pages so we can get those
3216 3217 3218 3219
	 * areas back if necessary. Otherwise, we may have to free
	 * excessively into the page allocator
	 */
	if (migratetype >= MIGRATE_PCPTYPES) {
3220
		if (unlikely(is_migrate_isolate(migratetype))) {
3221 3222
			free_one_page(zone, page, pfn, 0, migratetype,
				      FPI_NONE);
3223
			return;
3224 3225 3226 3227
		}
		migratetype = MIGRATE_MOVABLE;
	}

3228
	pcp = &this_cpu_ptr(zone->pageset)->pcp;
3229
	list_add(&page->lru, &pcp->lists[migratetype]);
L
Linus Torvalds 已提交
3230
	pcp->count++;
3231 3232
	if (pcp->count >= READ_ONCE(pcp->high))
		free_pcppages_bulk(zone, READ_ONCE(pcp->batch), pcp);
3233
}
3234

3235 3236 3237
/*
 * Free a 0-order page
 */
3238
void free_unref_page(struct page *page)
3239 3240 3241 3242
{
	unsigned long flags;
	unsigned long pfn = page_to_pfn(page);

3243
	if (!free_unref_page_prepare(page, pfn))
3244 3245 3246
		return;

	local_irq_save(flags);
3247
	free_unref_page_commit(page, pfn);
3248
	local_irq_restore(flags);
L
Linus Torvalds 已提交
3249 3250
}

3251 3252 3253
/*
 * Free a list of 0-order pages
 */
3254
void free_unref_page_list(struct list_head *list)
3255 3256
{
	struct page *page, *next;
3257
	unsigned long flags, pfn;
3258
	int batch_count = 0;
3259 3260 3261 3262

	/* Prepare pages for freeing */
	list_for_each_entry_safe(page, next, list, lru) {
		pfn = page_to_pfn(page);
3263
		if (!free_unref_page_prepare(page, pfn))
3264 3265 3266
			list_del(&page->lru);
		set_page_private(page, pfn);
	}
3267

3268
	local_irq_save(flags);
3269
	list_for_each_entry_safe(page, next, list, lru) {
3270 3271 3272
		unsigned long pfn = page_private(page);

		set_page_private(page, 0);
3273 3274
		trace_mm_page_free_batched(page);
		free_unref_page_commit(page, pfn);
3275 3276 3277 3278 3279 3280 3281 3282 3283 3284

		/*
		 * Guard against excessive IRQ disabled times when we get
		 * a large list of pages to free.
		 */
		if (++batch_count == SWAP_CLUSTER_MAX) {
			local_irq_restore(flags);
			batch_count = 0;
			local_irq_save(flags);
		}
3285
	}
3286
	local_irq_restore(flags);
3287 3288
}

N
Nick Piggin 已提交
3289 3290 3291 3292 3293 3294 3295 3296 3297 3298 3299 3300
/*
 * split_page takes a non-compound higher-order page, and splits it into
 * n (1<<order) sub-pages: page[0..n]
 * Each sub-page must be freed individually.
 *
 * Note: this is probably too low level an operation for use in drivers.
 * Please consult with lkml before using this in your driver.
 */
void split_page(struct page *page, unsigned int order)
{
	int i;

3301 3302
	VM_BUG_ON_PAGE(PageCompound(page), page);
	VM_BUG_ON_PAGE(!page_count(page), page);
3303

3304
	for (i = 1; i < (1 << order); i++)
3305
		set_page_refcounted(page + i);
3306
	split_page_owner(page, 1 << order);
N
Nick Piggin 已提交
3307
}
K
K. Y. Srinivasan 已提交
3308
EXPORT_SYMBOL_GPL(split_page);
N
Nick Piggin 已提交
3309

3310
int __isolate_free_page(struct page *page, unsigned int order)
3311 3312 3313
{
	unsigned long watermark;
	struct zone *zone;
3314
	int mt;
3315 3316 3317 3318

	BUG_ON(!PageBuddy(page));

	zone = page_zone(page);
3319
	mt = get_pageblock_migratetype(page);
3320

3321
	if (!is_migrate_isolate(mt)) {
3322 3323 3324 3325 3326 3327
		/*
		 * Obey watermarks as if the page was being allocated. We can
		 * emulate a high-order watermark check with a raised order-0
		 * watermark, because we already know our high-order page
		 * exists.
		 */
3328
		watermark = zone->_watermark[WMARK_MIN] + (1UL << order);
3329
		if (!zone_watermark_ok(zone, 0, watermark, 0, ALLOC_CMA))
3330 3331
			return 0;

3332
		__mod_zone_freepage_state(zone, -(1UL << order), mt);
3333
	}
3334 3335

	/* Remove page from free list */
3336

3337
	del_page_from_free_list(page, zone, order);
3338

3339 3340 3341 3342
	/*
	 * Set the pageblock if the isolated page is at least half of a
	 * pageblock
	 */
3343 3344
	if (order >= pageblock_order - 1) {
		struct page *endpage = page + (1 << order) - 1;
3345 3346
		for (; page < endpage; page += pageblock_nr_pages) {
			int mt = get_pageblock_migratetype(page);
M
Minchan Kim 已提交
3347
			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
3348
			    && !is_migrate_highatomic(mt))
3349 3350 3351
				set_pageblock_migratetype(page,
							  MIGRATE_MOVABLE);
		}
3352 3353
	}

3354

3355
	return 1UL << order;
3356 3357
}

3358 3359 3360 3361
/**
 * __putback_isolated_page - Return a now-isolated page back where we got it
 * @page: Page that was isolated
 * @order: Order of the isolated page
3362
 * @mt: The page's pageblock's migratetype
3363 3364 3365 3366 3367 3368 3369 3370 3371 3372 3373 3374
 *
 * This function is meant to return a page pulled from the free lists via
 * __isolate_free_page back to the free lists they were pulled from.
 */
void __putback_isolated_page(struct page *page, unsigned int order, int mt)
{
	struct zone *zone = page_zone(page);

	/* zone lock should be held when this function is called */
	lockdep_assert_held(&zone->lock);

	/* Return isolated page to tail of freelist. */
3375
	__free_one_page(page, page_to_pfn(page), zone, order, mt,
3376
			FPI_SKIP_REPORT_NOTIFY | FPI_TO_TAIL);
3377 3378
}

3379 3380 3381 3382 3383
/*
 * Update NUMA hit/miss statistics
 *
 * Must be called with interrupts disabled.
 */
M
Michal Hocko 已提交
3384
static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
3385 3386
{
#ifdef CONFIG_NUMA
3387
	enum numa_stat_item local_stat = NUMA_LOCAL;
3388

3389 3390 3391 3392
	/* skip numa counters update if numa stats is disabled */
	if (!static_branch_likely(&vm_numa_stat_key))
		return;

3393
	if (zone_to_nid(z) != numa_node_id())
3394 3395
		local_stat = NUMA_OTHER;

3396
	if (zone_to_nid(z) == zone_to_nid(preferred_zone))
3397
		__inc_numa_state(z, NUMA_HIT);
3398
	else {
3399 3400
		__inc_numa_state(z, NUMA_MISS);
		__inc_numa_state(preferred_zone, NUMA_FOREIGN);
3401
	}
3402
	__inc_numa_state(z, local_stat);
3403 3404 3405
#endif
}

3406 3407
/* Remove page from the per-cpu list, caller must protect the list */
static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
3408
			unsigned int alloc_flags,
M
Mel Gorman 已提交
3409
			struct per_cpu_pages *pcp,
3410 3411 3412 3413 3414 3415 3416
			struct list_head *list)
{
	struct page *page;

	do {
		if (list_empty(list)) {
			pcp->count += rmqueue_bulk(zone, 0,
3417
					READ_ONCE(pcp->batch), list,
3418
					migratetype, alloc_flags);
3419 3420 3421 3422
			if (unlikely(list_empty(list)))
				return NULL;
		}

M
Mel Gorman 已提交
3423
		page = list_first_entry(list, struct page, lru);
3424 3425 3426 3427 3428 3429 3430 3431 3432
		list_del(&page->lru);
		pcp->count--;
	} while (check_new_pcp(page));

	return page;
}

/* Lock and remove page from the per-cpu list */
static struct page *rmqueue_pcplist(struct zone *preferred_zone,
3433 3434
			struct zone *zone, gfp_t gfp_flags,
			int migratetype, unsigned int alloc_flags)
3435 3436 3437 3438
{
	struct per_cpu_pages *pcp;
	struct list_head *list;
	struct page *page;
3439
	unsigned long flags;
3440

3441
	local_irq_save(flags);
3442 3443
	pcp = &this_cpu_ptr(zone->pageset)->pcp;
	list = &pcp->lists[migratetype];
3444
	page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
3445
	if (page) {
3446
		__count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
3447 3448
		zone_statistics(preferred_zone, zone);
	}
3449
	local_irq_restore(flags);
3450 3451 3452
	return page;
}

L
Linus Torvalds 已提交
3453
/*
3454
 * Allocate a page from the given zone. Use pcplists for order-0 allocations.
L
Linus Torvalds 已提交
3455
 */
3456
static inline
3457
struct page *rmqueue(struct zone *preferred_zone,
3458
			struct zone *zone, unsigned int order,
3459 3460
			gfp_t gfp_flags, unsigned int alloc_flags,
			int migratetype)
L
Linus Torvalds 已提交
3461 3462
{
	unsigned long flags;
3463
	struct page *page;
L
Linus Torvalds 已提交
3464

3465
	if (likely(order == 0)) {
3466 3467 3468 3469 3470 3471 3472
		/*
		 * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
		 * we need to skip it when CMA area isn't allowed.
		 */
		if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
				migratetype != MIGRATE_MOVABLE) {
			page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
3473
					migratetype, alloc_flags);
3474 3475
			goto out;
		}
3476
	}
3477

3478 3479 3480 3481 3482 3483
	/*
	 * We most definitely don't want callers attempting to
	 * allocate greater than order-1 page units with __GFP_NOFAIL.
	 */
	WARN_ON_ONCE((gfp_flags & __GFP_NOFAIL) && (order > 1));
	spin_lock_irqsave(&zone->lock, flags);
3484

3485 3486
	do {
		page = NULL;
3487 3488 3489 3490 3491 3492 3493
		/*
		 * order-0 request can reach here when the pcplist is skipped
		 * due to non-CMA allocation context. HIGHATOMIC area is
		 * reserved for high-order atomic allocation, so order-0
		 * request should skip it.
		 */
		if (order > 0 && alloc_flags & ALLOC_HARDER) {
3494 3495 3496 3497
			page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
			if (page)
				trace_mm_page_alloc_zone_locked(page, order, migratetype);
		}
N
Nick Piggin 已提交
3498
		if (!page)
3499
			page = __rmqueue(zone, order, migratetype, alloc_flags);
3500 3501 3502 3503 3504 3505
	} while (page && check_new_pages(page, order));
	spin_unlock(&zone->lock);
	if (!page)
		goto failed;
	__mod_zone_freepage_state(zone, -(1 << order),
				  get_pcppage_migratetype(page));
L
Linus Torvalds 已提交
3506

3507
	__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
M
Michal Hocko 已提交
3508
	zone_statistics(preferred_zone, zone);
N
Nick Piggin 已提交
3509
	local_irq_restore(flags);
L
Linus Torvalds 已提交
3510

3511
out:
3512 3513 3514 3515 3516 3517
	/* Separate test+clear to avoid unnecessary atomics */
	if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
		clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
		wakeup_kswapd(zone, 0, 0, zone_idx(zone));
	}

3518
	VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
L
Linus Torvalds 已提交
3519
	return page;
N
Nick Piggin 已提交
3520 3521 3522 3523

failed:
	local_irq_restore(flags);
	return NULL;
L
Linus Torvalds 已提交
3524 3525
}

3526 3527
#ifdef CONFIG_FAIL_PAGE_ALLOC

3528
static struct {
3529 3530
	struct fault_attr attr;

3531
	bool ignore_gfp_highmem;
3532
	bool ignore_gfp_reclaim;
3533
	u32 min_order;
3534 3535
} fail_page_alloc = {
	.attr = FAULT_ATTR_INITIALIZER,
3536
	.ignore_gfp_reclaim = true,
3537
	.ignore_gfp_highmem = true,
3538
	.min_order = 1,
3539 3540 3541 3542 3543 3544 3545 3546
};

static int __init setup_fail_page_alloc(char *str)
{
	return setup_fault_attr(&fail_page_alloc.attr, str);
}
__setup("fail_page_alloc=", setup_fail_page_alloc);

3547
static bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3548
{
3549
	if (order < fail_page_alloc.min_order)
3550
		return false;
3551
	if (gfp_mask & __GFP_NOFAIL)
3552
		return false;
3553
	if (fail_page_alloc.ignore_gfp_highmem && (gfp_mask & __GFP_HIGHMEM))
3554
		return false;
3555 3556
	if (fail_page_alloc.ignore_gfp_reclaim &&
			(gfp_mask & __GFP_DIRECT_RECLAIM))
3557
		return false;
3558 3559 3560 3561 3562 3563 3564 3565

	return should_fail(&fail_page_alloc.attr, 1 << order);
}

#ifdef CONFIG_FAULT_INJECTION_DEBUG_FS

static int __init fail_page_alloc_debugfs(void)
{
3566
	umode_t mode = S_IFREG | 0600;
3567 3568
	struct dentry *dir;

3569 3570
	dir = fault_create_debugfs_attr("fail_page_alloc", NULL,
					&fail_page_alloc.attr);
3571

3572 3573 3574 3575 3576
	debugfs_create_bool("ignore-gfp-wait", mode, dir,
			    &fail_page_alloc.ignore_gfp_reclaim);
	debugfs_create_bool("ignore-gfp-highmem", mode, dir,
			    &fail_page_alloc.ignore_gfp_highmem);
	debugfs_create_u32("min-order", mode, dir, &fail_page_alloc.min_order);
3577

3578
	return 0;
3579 3580 3581 3582 3583 3584 3585 3586
}

late_initcall(fail_page_alloc_debugfs);

#endif /* CONFIG_FAULT_INJECTION_DEBUG_FS */

#else /* CONFIG_FAIL_PAGE_ALLOC */

3587
static inline bool __should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3588
{
3589
	return false;
3590 3591 3592 3593
}

#endif /* CONFIG_FAIL_PAGE_ALLOC */

3594
noinline bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order)
3595 3596 3597 3598 3599
{
	return __should_fail_alloc_page(gfp_mask, order);
}
ALLOW_ERROR_INJECTION(should_fail_alloc_page, TRUE);

3600 3601 3602 3603 3604 3605 3606 3607 3608 3609 3610 3611 3612 3613 3614 3615 3616 3617 3618 3619 3620 3621 3622
static inline long __zone_watermark_unusable_free(struct zone *z,
				unsigned int order, unsigned int alloc_flags)
{
	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
	long unusable_free = (1 << order) - 1;

	/*
	 * If the caller does not have rights to ALLOC_HARDER then subtract
	 * the high-atomic reserves. This will over-estimate the size of the
	 * atomic reserve but it avoids a search.
	 */
	if (likely(!alloc_harder))
		unusable_free += z->nr_reserved_highatomic;

#ifdef CONFIG_CMA
	/* If allocation can't use CMA areas don't use free CMA pages */
	if (!(alloc_flags & ALLOC_CMA))
		unusable_free += zone_page_state(z, NR_FREE_CMA_PAGES);
#endif

	return unusable_free;
}

L
Linus Torvalds 已提交
3623
/*
3624 3625 3626 3627
 * Return true if free base pages are above 'mark'. For high-order checks it
 * will return true of the order-0 watermark is reached and there is at least
 * one free page of a suitable size. Checking now avoids taking the zone lock
 * to check in the allocation paths if no pages are free.
L
Linus Torvalds 已提交
3628
 */
3629
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3630
			 int highest_zoneidx, unsigned int alloc_flags,
3631
			 long free_pages)
L
Linus Torvalds 已提交
3632
{
3633
	long min = mark;
L
Linus Torvalds 已提交
3634
	int o;
3635
	const bool alloc_harder = (alloc_flags & (ALLOC_HARDER|ALLOC_OOM));
L
Linus Torvalds 已提交
3636

3637
	/* free_pages may go negative - that's OK */
3638
	free_pages -= __zone_watermark_unusable_free(z, order, alloc_flags);
3639

R
Rohit Seth 已提交
3640
	if (alloc_flags & ALLOC_HIGH)
L
Linus Torvalds 已提交
3641
		min -= min / 2;
3642

3643
	if (unlikely(alloc_harder)) {
3644 3645 3646 3647 3648 3649 3650 3651 3652 3653 3654 3655
		/*
		 * OOM victims can try even harder than normal ALLOC_HARDER
		 * users on the grounds that it's definitely going to be in
		 * the exit path shortly and free memory. Any allocation it
		 * makes during the free path will be small and short-lived.
		 */
		if (alloc_flags & ALLOC_OOM)
			min -= min / 2;
		else
			min -= min / 4;
	}

3656 3657 3658 3659 3660
	/*
	 * Check watermarks for an order-0 allocation request. If these
	 * are not met, then a high-order request also cannot go ahead
	 * even if a suitable page happened to be free.
	 */
3661
	if (free_pages <= min + z->lowmem_reserve[highest_zoneidx])
3662
		return false;
L
Linus Torvalds 已提交
3663

3664 3665 3666 3667 3668 3669 3670 3671 3672 3673 3674 3675 3676
	/* If this is an order-0 request then the watermark is fine */
	if (!order)
		return true;

	/* For a high-order request, check at least one suitable page is free */
	for (o = order; o < MAX_ORDER; o++) {
		struct free_area *area = &z->free_area[o];
		int mt;

		if (!area->nr_free)
			continue;

		for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
3677
			if (!free_area_empty(area, mt))
3678 3679 3680 3681
				return true;
		}

#ifdef CONFIG_CMA
3682
		if ((alloc_flags & ALLOC_CMA) &&
3683
		    !free_area_empty(area, MIGRATE_CMA)) {
3684
			return true;
3685
		}
3686
#endif
3687
		if (alloc_harder && !free_area_empty(area, MIGRATE_HIGHATOMIC))
3688
			return true;
L
Linus Torvalds 已提交
3689
	}
3690
	return false;
3691 3692
}

3693
bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
3694
		      int highest_zoneidx, unsigned int alloc_flags)
3695
{
3696
	return __zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
3697 3698 3699
					zone_page_state(z, NR_FREE_PAGES));
}

3700
static inline bool zone_watermark_fast(struct zone *z, unsigned int order,
3701
				unsigned long mark, int highest_zoneidx,
3702
				unsigned int alloc_flags, gfp_t gfp_mask)
3703
{
3704
	long free_pages;
3705

3706
	free_pages = zone_page_state(z, NR_FREE_PAGES);
3707 3708 3709

	/*
	 * Fast check for order-0 only. If this fails then the reserves
3710
	 * need to be calculated.
3711
	 */
3712 3713 3714 3715 3716 3717 3718 3719
	if (!order) {
		long fast_free;

		fast_free = free_pages;
		fast_free -= __zone_watermark_unusable_free(z, 0, alloc_flags);
		if (fast_free > mark + z->lowmem_reserve[highest_zoneidx])
			return true;
	}
3720

3721 3722 3723 3724 3725 3726 3727 3728 3729 3730 3731 3732 3733 3734 3735 3736 3737
	if (__zone_watermark_ok(z, order, mark, highest_zoneidx, alloc_flags,
					free_pages))
		return true;
	/*
	 * Ignore watermark boosting for GFP_ATOMIC order-0 allocations
	 * when checking the min watermark. The min watermark is the
	 * point where boosting is ignored so that kswapd is woken up
	 * when below the low watermark.
	 */
	if (unlikely(!order && (gfp_mask & __GFP_ATOMIC) && z->watermark_boost
		&& ((alloc_flags & ALLOC_WMARK_MASK) == WMARK_MIN))) {
		mark = z->_watermark[WMARK_MIN];
		return __zone_watermark_ok(z, order, mark, highest_zoneidx,
					alloc_flags, free_pages);
	}

	return false;
3738 3739
}

3740
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
3741
			unsigned long mark, int highest_zoneidx)
3742 3743 3744 3745 3746 3747
{
	long free_pages = zone_page_state(z, NR_FREE_PAGES);

	if (z->percpu_drift_mark && free_pages < z->percpu_drift_mark)
		free_pages = zone_page_state_snapshot(z, NR_FREE_PAGES);

3748
	return __zone_watermark_ok(z, order, mark, highest_zoneidx, 0,
3749
								free_pages);
L
Linus Torvalds 已提交
3750 3751
}

3752
#ifdef CONFIG_NUMA
3753 3754
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
3755
	return node_distance(zone_to_nid(local_zone), zone_to_nid(zone)) <=
3756
				node_reclaim_distance;
3757
}
3758
#else	/* CONFIG_NUMA */
3759 3760 3761 3762
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
{
	return true;
}
3763 3764
#endif	/* CONFIG_NUMA */

3765 3766 3767 3768 3769 3770 3771 3772 3773
/*
 * The restriction on ZONE_DMA32 as being a suitable zone to use to avoid
 * fragmentation is subtle. If the preferred zone was HIGHMEM then
 * premature use of a lower zone may cause lowmem pressure problems that
 * are worse than fragmentation. If the next zone is ZONE_DMA then it is
 * probably too small. It only makes sense to spread allocations to avoid
 * fragmentation between the Normal and DMA32 zones.
 */
static inline unsigned int
3774
alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask)
3775
{
3776
	unsigned int alloc_flags;
3777

3778 3779 3780 3781 3782
	/*
	 * __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
	 * to save a branch.
	 */
	alloc_flags = (__force int) (gfp_mask & __GFP_KSWAPD_RECLAIM);
3783 3784

#ifdef CONFIG_ZONE_DMA32
3785 3786 3787
	if (!zone)
		return alloc_flags;

3788
	if (zone_idx(zone) != ZONE_NORMAL)
3789
		return alloc_flags;
3790 3791 3792 3793 3794 3795 3796 3797

	/*
	 * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and
	 * the pointer is within zone->zone_pgdat->node_zones[]. Also assume
	 * on UMA that if Normal is populated then so is DMA32.
	 */
	BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1);
	if (nr_online_nodes > 1 && !populated_zone(--zone))
3798
		return alloc_flags;
3799

3800
	alloc_flags |= ALLOC_NOFRAGMENT;
3801 3802
#endif /* CONFIG_ZONE_DMA32 */
	return alloc_flags;
3803 3804
}

3805 3806 3807 3808 3809 3810 3811 3812 3813 3814 3815 3816 3817 3818
static inline unsigned int current_alloc_flags(gfp_t gfp_mask,
					unsigned int alloc_flags)
{
#ifdef CONFIG_CMA
	unsigned int pflags = current->flags;

	if (!(pflags & PF_MEMALLOC_NOCMA) &&
			gfp_migratetype(gfp_mask) == MIGRATE_MOVABLE)
		alloc_flags |= ALLOC_CMA;

#endif
	return alloc_flags;
}

R
Rohit Seth 已提交
3819
/*
3820
 * get_page_from_freelist goes through the zonelist trying to allocate
R
Rohit Seth 已提交
3821 3822 3823
 * a page.
 */
static struct page *
3824 3825
get_page_from_freelist(gfp_t gfp_mask, unsigned int order, int alloc_flags,
						const struct alloc_context *ac)
M
Martin Hicks 已提交
3826
{
3827
	struct zoneref *z;
3828
	struct zone *zone;
3829
	struct pglist_data *last_pgdat_dirty_limit = NULL;
3830
	bool no_fallback;
3831

3832
retry:
R
Rohit Seth 已提交
3833
	/*
3834
	 * Scan zonelist, looking for a zone with enough free.
3835
	 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
R
Rohit Seth 已提交
3836
	 */
3837 3838
	no_fallback = alloc_flags & ALLOC_NOFRAGMENT;
	z = ac->preferred_zoneref;
3839 3840
	for_next_zone_zonelist_nodemask(zone, z, ac->highest_zoneidx,
					ac->nodemask) {
3841
		struct page *page;
3842 3843
		unsigned long mark;

3844 3845
		if (cpusets_enabled() &&
			(alloc_flags & ALLOC_CPUSET) &&
3846
			!__cpuset_zone_allowed(zone, gfp_mask))
3847
				continue;
3848 3849
		/*
		 * When allocating a page cache page for writing, we
3850 3851
		 * want to get it from a node that is within its dirty
		 * limit, such that no single node holds more than its
3852
		 * proportional share of globally allowed dirty pages.
3853
		 * The dirty limits take into account the node's
3854 3855 3856 3857 3858
		 * lowmem reserves and high watermark so that kswapd
		 * should be able to balance it without having to
		 * write pages from its LRU list.
		 *
		 * XXX: For now, allow allocations to potentially
3859
		 * exceed the per-node dirty limit in the slowpath
3860
		 * (spread_dirty_pages unset) before going into reclaim,
3861
		 * which is important when on a NUMA setup the allowed
3862
		 * nodes are together not big enough to reach the
3863
		 * global limit.  The proper fix for these situations
3864
		 * will require awareness of nodes in the
3865 3866
		 * dirty-throttling and the flusher threads.
		 */
3867 3868 3869 3870 3871 3872 3873 3874 3875
		if (ac->spread_dirty_pages) {
			if (last_pgdat_dirty_limit == zone->zone_pgdat)
				continue;

			if (!node_dirty_ok(zone->zone_pgdat)) {
				last_pgdat_dirty_limit = zone->zone_pgdat;
				continue;
			}
		}
R
Rohit Seth 已提交
3876

3877 3878 3879 3880 3881 3882 3883 3884 3885 3886 3887 3888 3889 3890 3891 3892
		if (no_fallback && nr_online_nodes > 1 &&
		    zone != ac->preferred_zoneref->zone) {
			int local_nid;

			/*
			 * If moving to a remote node, retry but allow
			 * fragmenting fallbacks. Locality is more important
			 * than fragmentation avoidance.
			 */
			local_nid = zone_to_nid(ac->preferred_zoneref->zone);
			if (zone_to_nid(zone) != local_nid) {
				alloc_flags &= ~ALLOC_NOFRAGMENT;
				goto retry;
			}
		}

3893
		mark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
3894
		if (!zone_watermark_fast(zone, order, mark,
3895 3896
				       ac->highest_zoneidx, alloc_flags,
				       gfp_mask)) {
3897 3898
			int ret;

3899 3900 3901 3902 3903 3904 3905 3906 3907 3908
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
			/*
			 * Watermark failed for this zone, but see if we can
			 * grow this zone if it contains deferred pages.
			 */
			if (static_branch_unlikely(&deferred_pages)) {
				if (_deferred_grow_zone(zone, order))
					goto try_this_zone;
			}
#endif
3909 3910 3911 3912 3913
			/* Checked here to keep the fast path fast */
			BUILD_BUG_ON(ALLOC_NO_WATERMARKS < NR_WMARK);
			if (alloc_flags & ALLOC_NO_WATERMARKS)
				goto try_this_zone;

3914
			if (node_reclaim_mode == 0 ||
3915
			    !zone_allows_reclaim(ac->preferred_zoneref->zone, zone))
3916 3917
				continue;

3918
			ret = node_reclaim(zone->zone_pgdat, gfp_mask, order);
3919
			switch (ret) {
3920
			case NODE_RECLAIM_NOSCAN:
3921
				/* did not scan */
3922
				continue;
3923
			case NODE_RECLAIM_FULL:
3924
				/* scanned but unreclaimable */
3925
				continue;
3926 3927
			default:
				/* did we reclaim enough */
3928
				if (zone_watermark_ok(zone, order, mark,
3929
					ac->highest_zoneidx, alloc_flags))
3930 3931 3932
					goto try_this_zone;

				continue;
3933
			}
R
Rohit Seth 已提交
3934 3935
		}

3936
try_this_zone:
3937
		page = rmqueue(ac->preferred_zoneref->zone, zone, order,
3938
				gfp_mask, alloc_flags, ac->migratetype);
3939
		if (page) {
3940
			prep_new_page(page, order, gfp_mask, alloc_flags);
3941 3942 3943 3944 3945 3946 3947 3948

			/*
			 * If this is a high-order atomic allocation then check
			 * if the pageblock should be reserved for the future
			 */
			if (unlikely(order && (alloc_flags & ALLOC_HARDER)))
				reserve_highatomic_pageblock(page, zone, order);

3949
			return page;
3950 3951 3952 3953 3954 3955 3956 3957
		} else {
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
			/* Try again if zone has deferred pages */
			if (static_branch_unlikely(&deferred_pages)) {
				if (_deferred_grow_zone(zone, order))
					goto try_this_zone;
			}
#endif
3958
		}
3959
	}
3960

3961 3962 3963 3964 3965 3966 3967 3968 3969
	/*
	 * It's possible on a UMA machine to get through all zones that are
	 * fragmented. If avoiding fragmentation, reset and try again.
	 */
	if (no_fallback) {
		alloc_flags &= ~ALLOC_NOFRAGMENT;
		goto retry;
	}

3970
	return NULL;
M
Martin Hicks 已提交
3971 3972
}

3973
static void warn_alloc_show_mem(gfp_t gfp_mask, nodemask_t *nodemask)
3974 3975 3976 3977 3978 3979 3980 3981 3982
{
	unsigned int filter = SHOW_MEM_FILTER_NODES;

	/*
	 * This documents exceptions given to allocations in certain
	 * contexts that are allowed to allocate outside current's set
	 * of allowed nodes.
	 */
	if (!(gfp_mask & __GFP_NOMEMALLOC))
3983
		if (tsk_is_oom_victim(current) ||
3984 3985
		    (current->flags & (PF_MEMALLOC | PF_EXITING)))
			filter &= ~SHOW_MEM_FILTER_NODES;
3986
	if (in_interrupt() || !(gfp_mask & __GFP_DIRECT_RECLAIM))
3987 3988
		filter &= ~SHOW_MEM_FILTER_NODES;

3989
	show_mem(filter, nodemask);
3990 3991
}

3992
void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...)
3993 3994 3995
{
	struct va_format vaf;
	va_list args;
3996
	static DEFINE_RATELIMIT_STATE(nopage_rs, 10*HZ, 1);
3997

3998
	if ((gfp_mask & __GFP_NOWARN) || !__ratelimit(&nopage_rs))
3999 4000
		return;

4001 4002 4003
	va_start(args, fmt);
	vaf.fmt = fmt;
	vaf.va = &args;
4004
	pr_warn("%s: %pV, mode:%#x(%pGg), nodemask=%*pbl",
M
Michal Hocko 已提交
4005 4006
			current->comm, &vaf, gfp_mask, &gfp_mask,
			nodemask_pr_args(nodemask));
4007
	va_end(args);
J
Joe Perches 已提交
4008

4009
	cpuset_print_current_mems_allowed();
4010
	pr_cont("\n");
4011
	dump_stack();
4012
	warn_alloc_show_mem(gfp_mask, nodemask);
4013 4014
}

4015 4016 4017 4018 4019 4020 4021 4022 4023 4024 4025 4026 4027 4028 4029 4030 4031 4032 4033 4034
static inline struct page *
__alloc_pages_cpuset_fallback(gfp_t gfp_mask, unsigned int order,
			      unsigned int alloc_flags,
			      const struct alloc_context *ac)
{
	struct page *page;

	page = get_page_from_freelist(gfp_mask, order,
			alloc_flags|ALLOC_CPUSET, ac);
	/*
	 * fallback to ignore cpuset restriction if our nodes
	 * are depleted
	 */
	if (!page)
		page = get_page_from_freelist(gfp_mask, order,
				alloc_flags, ac);

	return page;
}

4035 4036
static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
4037
	const struct alloc_context *ac, unsigned long *did_some_progress)
4038
{
4039 4040 4041
	struct oom_control oc = {
		.zonelist = ac->zonelist,
		.nodemask = ac->nodemask,
4042
		.memcg = NULL,
4043 4044 4045
		.gfp_mask = gfp_mask,
		.order = order,
	};
4046 4047
	struct page *page;

4048 4049 4050
	*did_some_progress = 0;

	/*
4051 4052
	 * Acquire the oom lock.  If that fails, somebody else is
	 * making progress for us.
4053
	 */
4054
	if (!mutex_trylock(&oom_lock)) {
4055
		*did_some_progress = 1;
4056
		schedule_timeout_uninterruptible(1);
L
Linus Torvalds 已提交
4057 4058
		return NULL;
	}
4059

4060 4061 4062
	/*
	 * Go through the zonelist yet one more time, keep very high watermark
	 * here, this is only to catch a parallel oom killing, we must fail if
4063 4064 4065
	 * we're still under heavy pressure. But make sure that this reclaim
	 * attempt shall not depend on __GFP_DIRECT_RECLAIM && !__GFP_NORETRY
	 * allocation which will never fail due to oom_lock already held.
4066
	 */
4067 4068 4069
	page = get_page_from_freelist((gfp_mask | __GFP_HARDWALL) &
				      ~__GFP_DIRECT_RECLAIM, order,
				      ALLOC_WMARK_HIGH|ALLOC_CPUSET, ac);
R
Rohit Seth 已提交
4070
	if (page)
4071 4072
		goto out;

4073 4074 4075 4076 4077 4078
	/* Coredumps can quickly deplete all memory reserves */
	if (current->flags & PF_DUMPCORE)
		goto out;
	/* The OOM killer will not help higher order allocs */
	if (order > PAGE_ALLOC_COSTLY_ORDER)
		goto out;
4079 4080 4081 4082 4083
	/*
	 * We have already exhausted all our reclaim opportunities without any
	 * success so it is time to admit defeat. We will skip the OOM killer
	 * because it is very likely that the caller has a more reasonable
	 * fallback than shooting a random task.
4084 4085
	 *
	 * The OOM killer may not free memory on a specific node.
4086
	 */
4087
	if (gfp_mask & (__GFP_RETRY_MAYFAIL | __GFP_THISNODE))
4088
		goto out;
4089
	/* The OOM killer does not needlessly kill tasks for lowmem */
4090
	if (ac->highest_zoneidx < ZONE_NORMAL)
4091 4092 4093 4094 4095 4096 4097 4098 4099 4100 4101 4102 4103
		goto out;
	if (pm_suspended_storage())
		goto out;
	/*
	 * XXX: GFP_NOFS allocations should rather fail than rely on
	 * other request to make a forward progress.
	 * We are in an unfortunate situation where out_of_memory cannot
	 * do much for this context but let's try it to at least get
	 * access to memory reserved if the current task is killed (see
	 * out_of_memory). Once filesystems are ready to handle allocation
	 * failures more gracefully we should just bail out here.
	 */

4104
	/* Exhausted what can be done so it's blame time */
4105
	if (out_of_memory(&oc) || WARN_ON_ONCE(gfp_mask & __GFP_NOFAIL)) {
4106
		*did_some_progress = 1;
4107

4108 4109 4110 4111 4112 4113
		/*
		 * Help non-failing allocations by giving them access to memory
		 * reserves
		 */
		if (gfp_mask & __GFP_NOFAIL)
			page = __alloc_pages_cpuset_fallback(gfp_mask, order,
4114 4115
					ALLOC_NO_WATERMARKS, ac);
	}
4116
out:
4117
	mutex_unlock(&oom_lock);
4118 4119 4120
	return page;
}

4121 4122 4123 4124 4125 4126
/*
 * Maximum number of compaction retries wit a progress before OOM
 * killer is consider as the only way to move forward.
 */
#define MAX_COMPACT_RETRIES 16

4127 4128 4129 4130
#ifdef CONFIG_COMPACTION
/* Try memory compaction for high-order allocations before reclaim */
static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4131
		unsigned int alloc_flags, const struct alloc_context *ac,
4132
		enum compact_priority prio, enum compact_result *compact_result)
4133
{
4134
	struct page *page = NULL;
4135
	unsigned long pflags;
4136
	unsigned int noreclaim_flag;
4137 4138

	if (!order)
4139 4140
		return NULL;

4141
	psi_memstall_enter(&pflags);
4142
	noreclaim_flag = memalloc_noreclaim_save();
4143

4144
	*compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
4145
								prio, &page);
4146

4147
	memalloc_noreclaim_restore(noreclaim_flag);
4148
	psi_memstall_leave(&pflags);
4149

4150 4151 4152 4153 4154
	/*
	 * At least in one zone compaction wasn't deferred or skipped, so let's
	 * count a compaction stall
	 */
	count_vm_event(COMPACTSTALL);
4155

4156 4157 4158 4159 4160 4161 4162
	/* Prep a captured page if available */
	if (page)
		prep_new_page(page, order, gfp_mask, alloc_flags);

	/* Try get a page from the freelist if available */
	if (!page)
		page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4163

4164 4165
	if (page) {
		struct zone *zone = page_zone(page);
4166

4167 4168 4169 4170 4171
		zone->compact_blockskip_flush = false;
		compaction_defer_reset(zone, order, true);
		count_vm_event(COMPACTSUCCESS);
		return page;
	}
4172

4173 4174 4175 4176 4177
	/*
	 * It's bad if compaction run occurs and fails. The most likely reason
	 * is that pages exist, but not enough to satisfy watermarks.
	 */
	count_vm_event(COMPACTFAIL);
4178

4179
	cond_resched();
4180 4181 4182

	return NULL;
}
4183

4184 4185 4186 4187
static inline bool
should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
		     enum compact_result compact_result,
		     enum compact_priority *compact_priority,
4188
		     int *compaction_retries)
4189 4190
{
	int max_retries = MAX_COMPACT_RETRIES;
4191
	int min_priority;
4192 4193 4194
	bool ret = false;
	int retries = *compaction_retries;
	enum compact_priority priority = *compact_priority;
4195 4196 4197 4198

	if (!order)
		return false;

4199 4200 4201
	if (compaction_made_progress(compact_result))
		(*compaction_retries)++;

4202 4203 4204 4205 4206
	/*
	 * compaction considers all the zone as desperately out of memory
	 * so it doesn't really make much sense to retry except when the
	 * failure could be caused by insufficient priority
	 */
4207 4208
	if (compaction_failed(compact_result))
		goto check_priority;
4209

4210 4211 4212 4213 4214 4215 4216 4217 4218
	/*
	 * compaction was skipped because there are not enough order-0 pages
	 * to work with, so we retry only if it looks like reclaim can help.
	 */
	if (compaction_needs_reclaim(compact_result)) {
		ret = compaction_zonelist_suitable(ac, order, alloc_flags);
		goto out;
	}

4219 4220 4221
	/*
	 * make sure the compaction wasn't deferred or didn't bail out early
	 * due to locks contention before we declare that we should give up.
4222 4223
	 * But the next retry should use a higher priority if allowed, so
	 * we don't just keep bailing out endlessly.
4224
	 */
4225
	if (compaction_withdrawn(compact_result)) {
4226
		goto check_priority;
4227
	}
4228 4229

	/*
4230
	 * !costly requests are much more important than __GFP_RETRY_MAYFAIL
4231 4232 4233 4234 4235 4236 4237 4238
	 * costly ones because they are de facto nofail and invoke OOM
	 * killer to move on while costly can fail and users are ready
	 * to cope with that. 1/4 retries is rather arbitrary but we
	 * would need much more detailed feedback from compaction to
	 * make a better decision.
	 */
	if (order > PAGE_ALLOC_COSTLY_ORDER)
		max_retries /= 4;
4239 4240 4241 4242
	if (*compaction_retries <= max_retries) {
		ret = true;
		goto out;
	}
4243

4244 4245 4246 4247 4248
	/*
	 * Make sure there are attempts at the highest priority if we exhausted
	 * all retries or failed at the lower priorities.
	 */
check_priority:
4249 4250
	min_priority = (order > PAGE_ALLOC_COSTLY_ORDER) ?
			MIN_COMPACT_COSTLY_PRIORITY : MIN_COMPACT_PRIORITY;
4251

4252
	if (*compact_priority > min_priority) {
4253 4254
		(*compact_priority)--;
		*compaction_retries = 0;
4255
		ret = true;
4256
	}
4257 4258 4259
out:
	trace_compact_retry(order, priority, compact_result, retries, max_retries, ret);
	return ret;
4260
}
4261 4262 4263
#else
static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
4264
		unsigned int alloc_flags, const struct alloc_context *ac,
4265
		enum compact_priority prio, enum compact_result *compact_result)
4266
{
4267
	*compact_result = COMPACT_SKIPPED;
4268 4269
	return NULL;
}
4270 4271

static inline bool
4272 4273
should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
		     enum compact_result compact_result,
4274
		     enum compact_priority *compact_priority,
4275
		     int *compaction_retries)
4276
{
4277 4278 4279 4280 4281 4282 4283 4284 4285 4286 4287 4288
	struct zone *zone;
	struct zoneref *z;

	if (!order || order > PAGE_ALLOC_COSTLY_ORDER)
		return false;

	/*
	 * There are setups with compaction disabled which would prefer to loop
	 * inside the allocator rather than hit the oom killer prematurely.
	 * Let's give them a good hope and keep retrying while the order-0
	 * watermarks are OK.
	 */
4289 4290
	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
				ac->highest_zoneidx, ac->nodemask) {
4291
		if (zone_watermark_ok(zone, 0, min_wmark_pages(zone),
4292
					ac->highest_zoneidx, alloc_flags))
4293 4294
			return true;
	}
4295 4296
	return false;
}
4297
#endif /* CONFIG_COMPACTION */
4298

4299
#ifdef CONFIG_LOCKDEP
4300
static struct lockdep_map __fs_reclaim_map =
4301 4302
	STATIC_LOCKDEP_MAP_INIT("fs_reclaim", &__fs_reclaim_map);

4303
static bool __need_reclaim(gfp_t gfp_mask)
4304 4305 4306 4307 4308 4309
{
	/* no reclaim without waiting on it */
	if (!(gfp_mask & __GFP_DIRECT_RECLAIM))
		return false;

	/* this guy won't enter reclaim */
T
Tetsuo Handa 已提交
4310
	if (current->flags & PF_MEMALLOC)
4311 4312 4313 4314 4315 4316 4317 4318
		return false;

	if (gfp_mask & __GFP_NOLOCKDEP)
		return false;

	return true;
}

4319 4320 4321 4322 4323 4324 4325 4326 4327 4328
void __fs_reclaim_acquire(void)
{
	lock_map_acquire(&__fs_reclaim_map);
}

void __fs_reclaim_release(void)
{
	lock_map_release(&__fs_reclaim_map);
}

4329 4330
void fs_reclaim_acquire(gfp_t gfp_mask)
{
4331 4332 4333 4334 4335 4336 4337 4338 4339 4340 4341 4342
	gfp_mask = current_gfp_context(gfp_mask);

	if (__need_reclaim(gfp_mask)) {
		if (gfp_mask & __GFP_FS)
			__fs_reclaim_acquire();

#ifdef CONFIG_MMU_NOTIFIER
		lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
		lock_map_release(&__mmu_notifier_invalidate_range_start_map);
#endif

	}
4343 4344 4345 4346 4347
}
EXPORT_SYMBOL_GPL(fs_reclaim_acquire);

void fs_reclaim_release(gfp_t gfp_mask)
{
4348 4349 4350 4351 4352 4353
	gfp_mask = current_gfp_context(gfp_mask);

	if (__need_reclaim(gfp_mask)) {
		if (gfp_mask & __GFP_FS)
			__fs_reclaim_release();
	}
4354 4355 4356 4357
}
EXPORT_SYMBOL_GPL(fs_reclaim_release);
#endif

4358
/* Perform direct synchronous page reclaim */
4359
static unsigned long
4360 4361
__perform_reclaim(gfp_t gfp_mask, unsigned int order,
					const struct alloc_context *ac)
4362
{
4363
	unsigned int noreclaim_flag;
4364
	unsigned long pflags, progress;
4365 4366 4367 4368 4369

	cond_resched();

	/* We now go into synchronous reclaim */
	cpuset_memory_pressure_bump();
4370
	psi_memstall_enter(&pflags);
4371
	fs_reclaim_acquire(gfp_mask);
4372
	noreclaim_flag = memalloc_noreclaim_save();
4373

4374 4375
	progress = try_to_free_pages(ac->zonelist, order, gfp_mask,
								ac->nodemask);
4376

4377
	memalloc_noreclaim_restore(noreclaim_flag);
4378
	fs_reclaim_release(gfp_mask);
4379
	psi_memstall_leave(&pflags);
4380 4381 4382

	cond_resched();

4383 4384 4385 4386 4387 4388
	return progress;
}

/* The really slow allocator path where we enter direct reclaim */
static inline struct page *
__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
4389
		unsigned int alloc_flags, const struct alloc_context *ac,
4390
		unsigned long *did_some_progress)
4391 4392 4393 4394
{
	struct page *page = NULL;
	bool drained = false;

4395
	*did_some_progress = __perform_reclaim(gfp_mask, order, ac);
4396 4397
	if (unlikely(!(*did_some_progress)))
		return NULL;
4398

4399
retry:
4400
	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
4401 4402 4403

	/*
	 * If an allocation failed after direct reclaim, it could be because
4404
	 * pages are pinned on the per-cpu lists or in high alloc reserves.
4405
	 * Shrink them and try again
4406 4407
	 */
	if (!page && !drained) {
4408
		unreserve_highatomic_pageblock(ac, false);
4409
		drain_all_pages(NULL);
4410 4411 4412 4413
		drained = true;
		goto retry;
	}

4414 4415 4416
	return page;
}

4417 4418
static void wake_all_kswapds(unsigned int order, gfp_t gfp_mask,
			     const struct alloc_context *ac)
4419 4420 4421
{
	struct zoneref *z;
	struct zone *zone;
4422
	pg_data_t *last_pgdat = NULL;
4423
	enum zone_type highest_zoneidx = ac->highest_zoneidx;
4424

4425
	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, highest_zoneidx,
4426
					ac->nodemask) {
4427
		if (last_pgdat != zone->zone_pgdat)
4428
			wakeup_kswapd(zone, gfp_mask, order, highest_zoneidx);
4429 4430
		last_pgdat = zone->zone_pgdat;
	}
4431 4432
}

4433
static inline unsigned int
4434 4435
gfp_to_alloc_flags(gfp_t gfp_mask)
{
4436
	unsigned int alloc_flags = ALLOC_WMARK_MIN | ALLOC_CPUSET;
L
Linus Torvalds 已提交
4437

4438 4439 4440 4441 4442
	/*
	 * __GFP_HIGH is assumed to be the same as ALLOC_HIGH
	 * and __GFP_KSWAPD_RECLAIM is assumed to be the same as ALLOC_KSWAPD
	 * to save two branches.
	 */
4443
	BUILD_BUG_ON(__GFP_HIGH != (__force gfp_t) ALLOC_HIGH);
4444
	BUILD_BUG_ON(__GFP_KSWAPD_RECLAIM != (__force gfp_t) ALLOC_KSWAPD);
4445

4446 4447 4448 4449
	/*
	 * The caller may dip into page reserves a bit more if the caller
	 * cannot run direct reclaim, or if the caller has realtime scheduling
	 * policy or is asking for __GFP_HIGH memory.  GFP_ATOMIC requests will
4450
	 * set both ALLOC_HARDER (__GFP_ATOMIC) and ALLOC_HIGH (__GFP_HIGH).
4451
	 */
4452 4453
	alloc_flags |= (__force int)
		(gfp_mask & (__GFP_HIGH | __GFP_KSWAPD_RECLAIM));
L
Linus Torvalds 已提交
4454

4455
	if (gfp_mask & __GFP_ATOMIC) {
4456
		/*
4457 4458
		 * Not worth trying to allocate harder for __GFP_NOMEMALLOC even
		 * if it can't schedule.
4459
		 */
4460
		if (!(gfp_mask & __GFP_NOMEMALLOC))
4461
			alloc_flags |= ALLOC_HARDER;
4462
		/*
4463
		 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
4464
		 * comment for __cpuset_node_allowed().
4465
		 */
4466
		alloc_flags &= ~ALLOC_CPUSET;
4467
	} else if (unlikely(rt_task(current)) && !in_interrupt())
4468 4469
		alloc_flags |= ALLOC_HARDER;

4470 4471
	alloc_flags = current_alloc_flags(gfp_mask, alloc_flags);

4472 4473 4474
	return alloc_flags;
}

4475
static bool oom_reserves_allowed(struct task_struct *tsk)
4476
{
4477 4478 4479 4480 4481 4482 4483 4484
	if (!tsk_is_oom_victim(tsk))
		return false;

	/*
	 * !MMU doesn't have oom reaper so give access to memory reserves
	 * only to the thread with TIF_MEMDIE set
	 */
	if (!IS_ENABLED(CONFIG_MMU) && !test_thread_flag(TIF_MEMDIE))
4485 4486
		return false;

4487 4488 4489 4490 4491 4492 4493 4494 4495 4496 4497
	return true;
}

/*
 * Distinguish requests which really need access to full memory
 * reserves from oom victims which can live with a portion of it
 */
static inline int __gfp_pfmemalloc_flags(gfp_t gfp_mask)
{
	if (unlikely(gfp_mask & __GFP_NOMEMALLOC))
		return 0;
4498
	if (gfp_mask & __GFP_MEMALLOC)
4499
		return ALLOC_NO_WATERMARKS;
4500
	if (in_serving_softirq() && (current->flags & PF_MEMALLOC))
4501 4502 4503 4504 4505 4506 4507
		return ALLOC_NO_WATERMARKS;
	if (!in_interrupt()) {
		if (current->flags & PF_MEMALLOC)
			return ALLOC_NO_WATERMARKS;
		else if (oom_reserves_allowed(current))
			return ALLOC_OOM;
	}
4508

4509 4510 4511 4512 4513 4514
	return 0;
}

bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
{
	return !!__gfp_pfmemalloc_flags(gfp_mask);
4515 4516
}

M
Michal Hocko 已提交
4517 4518 4519
/*
 * Checks whether it makes sense to retry the reclaim to make a forward progress
 * for the given allocation request.
4520 4521 4522 4523
 *
 * We give up when we either have tried MAX_RECLAIM_RETRIES in a row
 * without success, or when we couldn't even meet the watermark if we
 * reclaimed all remaining pages on the LRU lists.
M
Michal Hocko 已提交
4524 4525 4526 4527 4528 4529
 *
 * Returns true if a retry is viable or false to enter the oom path.
 */
static inline bool
should_reclaim_retry(gfp_t gfp_mask, unsigned order,
		     struct alloc_context *ac, int alloc_flags,
4530
		     bool did_some_progress, int *no_progress_loops)
M
Michal Hocko 已提交
4531 4532 4533
{
	struct zone *zone;
	struct zoneref *z;
4534
	bool ret = false;
M
Michal Hocko 已提交
4535

4536 4537 4538 4539 4540 4541 4542 4543 4544 4545
	/*
	 * Costly allocations might have made a progress but this doesn't mean
	 * their order will become available due to high fragmentation so
	 * always increment the no progress counter for them
	 */
	if (did_some_progress && order <= PAGE_ALLOC_COSTLY_ORDER)
		*no_progress_loops = 0;
	else
		(*no_progress_loops)++;

M
Michal Hocko 已提交
4546 4547 4548 4549
	/*
	 * Make sure we converge to OOM if we cannot make any progress
	 * several times in the row.
	 */
4550 4551
	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
		/* Before OOM, exhaust highatomic_reserve */
4552
		return unreserve_highatomic_pageblock(ac, true);
4553
	}
M
Michal Hocko 已提交
4554

4555 4556 4557 4558 4559
	/*
	 * Keep reclaiming pages while there is a chance this will lead
	 * somewhere.  If none of the target zones can satisfy our allocation
	 * request even if all reclaimable pages are considered then we are
	 * screwed and have to go OOM.
M
Michal Hocko 已提交
4560
	 */
4561 4562
	for_each_zone_zonelist_nodemask(zone, z, ac->zonelist,
				ac->highest_zoneidx, ac->nodemask) {
M
Michal Hocko 已提交
4563
		unsigned long available;
4564
		unsigned long reclaimable;
4565 4566
		unsigned long min_wmark = min_wmark_pages(zone);
		bool wmark;
M
Michal Hocko 已提交
4567

4568 4569
		available = reclaimable = zone_reclaimable_pages(zone);
		available += zone_page_state_snapshot(zone, NR_FREE_PAGES);
M
Michal Hocko 已提交
4570 4571

		/*
4572 4573
		 * Would the allocation succeed if we reclaimed all
		 * reclaimable pages?
M
Michal Hocko 已提交
4574
		 */
4575
		wmark = __zone_watermark_ok(zone, order, min_wmark,
4576
				ac->highest_zoneidx, alloc_flags, available);
4577 4578 4579
		trace_reclaim_retry_zone(z, order, reclaimable,
				available, min_wmark, *no_progress_loops, wmark);
		if (wmark) {
4580 4581 4582 4583 4584 4585 4586
			/*
			 * If we didn't make any progress and have a lot of
			 * dirty + writeback pages then we should wait for
			 * an IO to complete to slow down the reclaim and
			 * prevent from pre mature OOM
			 */
			if (!did_some_progress) {
4587
				unsigned long write_pending;
4588

4589 4590
				write_pending = zone_page_state_snapshot(zone,
							NR_ZONE_WRITE_PENDING);
4591

4592
				if (2 * write_pending > reclaimable) {
4593 4594 4595 4596
					congestion_wait(BLK_RW_ASYNC, HZ/10);
					return true;
				}
			}
4597

4598 4599
			ret = true;
			goto out;
M
Michal Hocko 已提交
4600 4601 4602
		}
	}

4603 4604 4605 4606 4607 4608 4609 4610 4611 4612 4613 4614 4615
out:
	/*
	 * Memory allocation/reclaim might be called from a WQ context and the
	 * current implementation of the WQ concurrency control doesn't
	 * recognize that a particular WQ is congested if the worker thread is
	 * looping without ever sleeping. Therefore we have to do a short sleep
	 * here rather than calling cond_resched().
	 */
	if (current->flags & PF_WQ_WORKER)
		schedule_timeout_uninterruptible(1);
	else
		cond_resched();
	return ret;
M
Michal Hocko 已提交
4616 4617
}

4618 4619 4620 4621 4622 4623 4624 4625 4626 4627 4628 4629 4630 4631 4632 4633 4634 4635 4636 4637 4638 4639 4640 4641 4642 4643 4644 4645 4646 4647 4648 4649 4650
static inline bool
check_retry_cpuset(int cpuset_mems_cookie, struct alloc_context *ac)
{
	/*
	 * It's possible that cpuset's mems_allowed and the nodemask from
	 * mempolicy don't intersect. This should be normally dealt with by
	 * policy_nodemask(), but it's possible to race with cpuset update in
	 * such a way the check therein was true, and then it became false
	 * before we got our cpuset_mems_cookie here.
	 * This assumes that for all allocations, ac->nodemask can come only
	 * from MPOL_BIND mempolicy (whose documented semantics is to be ignored
	 * when it does not intersect with the cpuset restrictions) or the
	 * caller can deal with a violated nodemask.
	 */
	if (cpusets_enabled() && ac->nodemask &&
			!cpuset_nodemask_valid_mems_allowed(ac->nodemask)) {
		ac->nodemask = NULL;
		return true;
	}

	/*
	 * When updating a task's mems_allowed or mempolicy nodemask, it is
	 * possible to race with parallel threads in such a way that our
	 * allocation can fail while the mask is being updated. If we are about
	 * to fail, check if the cpuset changed during allocation and if so,
	 * retry.
	 */
	if (read_mems_allowed_retry(cpuset_mems_cookie))
		return true;

	return false;
}

4651 4652
static inline struct page *
__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
4653
						struct alloc_context *ac)
4654
{
4655
	bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
4656
	const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
4657
	struct page *page = NULL;
4658
	unsigned int alloc_flags;
4659
	unsigned long did_some_progress;
4660
	enum compact_priority compact_priority;
4661
	enum compact_result compact_result;
4662 4663 4664
	int compaction_retries;
	int no_progress_loops;
	unsigned int cpuset_mems_cookie;
4665
	int reserve_flags;
L
Linus Torvalds 已提交
4666

4667 4668 4669 4670 4671 4672 4673 4674
	/*
	 * We also sanity check to catch abuse of atomic reserves being used by
	 * callers that are not in atomic context.
	 */
	if (WARN_ON_ONCE((gfp_mask & (__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)) ==
				(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
		gfp_mask &= ~__GFP_ATOMIC;

4675 4676 4677 4678 4679
retry_cpuset:
	compaction_retries = 0;
	no_progress_loops = 0;
	compact_priority = DEF_COMPACT_PRIORITY;
	cpuset_mems_cookie = read_mems_allowed_begin();
4680 4681 4682 4683 4684 4685 4686 4687

	/*
	 * The fast path uses conservative alloc_flags to succeed only until
	 * kswapd needs to be woken up, and to avoid the cost of setting up
	 * alloc_flags precisely. So we do that now.
	 */
	alloc_flags = gfp_to_alloc_flags(gfp_mask);

4688 4689 4690 4691 4692 4693 4694
	/*
	 * We need to recalculate the starting point for the zonelist iterator
	 * because we might have used different nodemask in the fast path, or
	 * there was a cpuset modification and we are retrying - otherwise we
	 * could end up iterating over non-eligible zones endlessly.
	 */
	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4695
					ac->highest_zoneidx, ac->nodemask);
4696 4697 4698
	if (!ac->preferred_zoneref->zone)
		goto nopage;

4699
	if (alloc_flags & ALLOC_KSWAPD)
4700
		wake_all_kswapds(order, gfp_mask, ac);
4701 4702 4703 4704 4705 4706 4707 4708 4709

	/*
	 * The adjusted alloc_flags might result in immediate success, so try
	 * that first
	 */
	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
	if (page)
		goto got_pg;

4710 4711
	/*
	 * For costly allocations, try direct compaction first, as it's likely
4712 4713 4714 4715 4716 4717
	 * that we have enough base pages and don't need to reclaim. For non-
	 * movable high-order allocations, do that as well, as compaction will
	 * try prevent permanent fragmentation by migrating from blocks of the
	 * same migratetype.
	 * Don't try this for allocations that are allowed to ignore
	 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
4718
	 */
4719 4720 4721 4722
	if (can_direct_reclaim &&
			(costly_order ||
			   (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
			&& !gfp_pfmemalloc_allowed(gfp_mask)) {
4723 4724
		page = __alloc_pages_direct_compact(gfp_mask, order,
						alloc_flags, ac,
4725
						INIT_COMPACT_PRIORITY,
4726 4727 4728 4729
						&compact_result);
		if (page)
			goto got_pg;

4730 4731 4732 4733 4734
		/*
		 * Checks for costly allocations with __GFP_NORETRY, which
		 * includes some THP page fault allocations
		 */
		if (costly_order && (gfp_mask & __GFP_NORETRY)) {
4735 4736 4737 4738
			/*
			 * If allocating entire pageblock(s) and compaction
			 * failed because all zones are below low watermarks
			 * or is prohibited because it recently failed at this
4739 4740
			 * order, fail immediately unless the allocator has
			 * requested compaction and reclaim retry.
4741 4742 4743 4744 4745 4746 4747 4748 4749 4750 4751 4752 4753 4754
			 *
			 * Reclaim is
			 *  - potentially very expensive because zones are far
			 *    below their low watermarks or this is part of very
			 *    bursty high order allocations,
			 *  - not guaranteed to help because isolate_freepages()
			 *    may not iterate over freed pages as part of its
			 *    linear scan, and
			 *  - unlikely to make entire pageblocks free on its
			 *    own.
			 */
			if (compact_result == COMPACT_SKIPPED ||
			    compact_result == COMPACT_DEFERRED)
				goto nopage;
4755 4756

			/*
4757 4758
			 * Looks like reclaim/compaction is worth trying, but
			 * sync compaction could be very expensive, so keep
4759
			 * using async compaction.
4760
			 */
4761
			compact_priority = INIT_COMPACT_PRIORITY;
4762 4763
		}
	}
4764

4765
retry:
4766
	/* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
4767
	if (alloc_flags & ALLOC_KSWAPD)
4768
		wake_all_kswapds(order, gfp_mask, ac);
4769

4770 4771
	reserve_flags = __gfp_pfmemalloc_flags(gfp_mask);
	if (reserve_flags)
4772
		alloc_flags = current_alloc_flags(gfp_mask, reserve_flags);
4773

4774
	/*
4775 4776 4777
	 * Reset the nodemask and zonelist iterators if memory policies can be
	 * ignored. These allocations are high priority and system rather than
	 * user oriented.
4778
	 */
4779
	if (!(alloc_flags & ALLOC_CPUSET) || reserve_flags) {
4780
		ac->nodemask = NULL;
4781
		ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4782
					ac->highest_zoneidx, ac->nodemask);
4783 4784
	}

4785
	/* Attempt with potentially adjusted zonelist and alloc_flags */
4786
	page = get_page_from_freelist(gfp_mask, order, alloc_flags, ac);
R
Rohit Seth 已提交
4787 4788
	if (page)
		goto got_pg;
L
Linus Torvalds 已提交
4789

4790
	/* Caller is not willing to reclaim, we can't balance anything */
4791
	if (!can_direct_reclaim)
L
Linus Torvalds 已提交
4792 4793
		goto nopage;

4794 4795
	/* Avoid recursion of direct reclaim */
	if (current->flags & PF_MEMALLOC)
4796 4797
		goto nopage;

4798 4799 4800 4801 4802 4803 4804
	/* Try direct reclaim and then allocating */
	page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
							&did_some_progress);
	if (page)
		goto got_pg;

	/* Try direct compaction and then allocating */
4805
	page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
4806
					compact_priority, &compact_result);
4807 4808
	if (page)
		goto got_pg;
4809

4810 4811
	/* Do not loop if specifically requested */
	if (gfp_mask & __GFP_NORETRY)
4812
		goto nopage;
4813

M
Michal Hocko 已提交
4814 4815
	/*
	 * Do not retry costly high order allocations unless they are
4816
	 * __GFP_RETRY_MAYFAIL
M
Michal Hocko 已提交
4817
	 */
4818
	if (costly_order && !(gfp_mask & __GFP_RETRY_MAYFAIL))
4819
		goto nopage;
M
Michal Hocko 已提交
4820 4821

	if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,
4822
				 did_some_progress > 0, &no_progress_loops))
M
Michal Hocko 已提交
4823 4824
		goto retry;

4825 4826 4827 4828 4829 4830 4831
	/*
	 * It doesn't make any sense to retry for the compaction if the order-0
	 * reclaim is not able to make any progress because the current
	 * implementation of the compaction depends on the sufficient amount
	 * of free memory (see __compaction_suitable)
	 */
	if (did_some_progress > 0 &&
4832
			should_compact_retry(ac, order, alloc_flags,
4833
				compact_result, &compact_priority,
4834
				&compaction_retries))
4835 4836
		goto retry;

4837 4838 4839

	/* Deal with possible cpuset update races before we start OOM killing */
	if (check_retry_cpuset(cpuset_mems_cookie, ac))
4840 4841
		goto retry_cpuset;

4842 4843 4844 4845 4846
	/* Reclaim has failed us, start killing things */
	page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
	if (page)
		goto got_pg;

4847
	/* Avoid allocations with no watermarks from looping endlessly */
4848
	if (tsk_is_oom_victim(current) &&
4849
	    (alloc_flags & ALLOC_OOM ||
4850
	     (gfp_mask & __GFP_NOMEMALLOC)))
4851 4852
		goto nopage;

4853
	/* Retry as long as the OOM killer is making progress */
M
Michal Hocko 已提交
4854 4855
	if (did_some_progress) {
		no_progress_loops = 0;
4856
		goto retry;
M
Michal Hocko 已提交
4857
	}
4858

L
Linus Torvalds 已提交
4859
nopage:
4860 4861
	/* Deal with possible cpuset update races before we fail */
	if (check_retry_cpuset(cpuset_mems_cookie, ac))
4862 4863
		goto retry_cpuset;

4864 4865 4866 4867 4868 4869 4870 4871 4872 4873 4874 4875 4876 4877 4878 4879 4880 4881 4882 4883 4884 4885 4886 4887 4888 4889 4890
	/*
	 * Make sure that __GFP_NOFAIL request doesn't leak out and make sure
	 * we always retry
	 */
	if (gfp_mask & __GFP_NOFAIL) {
		/*
		 * All existing users of the __GFP_NOFAIL are blockable, so warn
		 * of any new users that actually require GFP_NOWAIT
		 */
		if (WARN_ON_ONCE(!can_direct_reclaim))
			goto fail;

		/*
		 * PF_MEMALLOC request from this context is rather bizarre
		 * because we cannot reclaim anything and only can loop waiting
		 * for somebody to do a work for us
		 */
		WARN_ON_ONCE(current->flags & PF_MEMALLOC);

		/*
		 * non failing costly orders are a hard requirement which we
		 * are not prepared for much so let's warn about these users
		 * so that we can identify them and convert them to something
		 * else.
		 */
		WARN_ON_ONCE(order > PAGE_ALLOC_COSTLY_ORDER);

4891 4892 4893 4894 4895 4896 4897 4898 4899 4900
		/*
		 * Help non-failing allocations by giving them access to memory
		 * reserves but do not use ALLOC_NO_WATERMARKS because this
		 * could deplete whole memory reserves which would just make
		 * the situation worse
		 */
		page = __alloc_pages_cpuset_fallback(gfp_mask, order, ALLOC_HARDER, ac);
		if (page)
			goto got_pg;

4901 4902 4903 4904
		cond_resched();
		goto retry;
	}
fail:
4905
	warn_alloc(gfp_mask, ac->nodemask,
4906
			"page allocation failure: order:%u", order);
L
Linus Torvalds 已提交
4907
got_pg:
4908
	return page;
L
Linus Torvalds 已提交
4909
}
4910

4911
static inline bool prepare_alloc_pages(gfp_t gfp_mask, unsigned int order,
4912
		int preferred_nid, nodemask_t *nodemask,
4913 4914
		struct alloc_context *ac, gfp_t *alloc_mask,
		unsigned int *alloc_flags)
4915
{
4916
	ac->highest_zoneidx = gfp_zone(gfp_mask);
4917
	ac->zonelist = node_zonelist(preferred_nid, gfp_mask);
4918
	ac->nodemask = nodemask;
4919
	ac->migratetype = gfp_migratetype(gfp_mask);
4920

4921
	if (cpusets_enabled()) {
4922
		*alloc_mask |= __GFP_HARDWALL;
4923 4924 4925 4926 4927
		/*
		 * When we are in the interrupt context, it is irrelevant
		 * to the current task context. It means that any node ok.
		 */
		if (!in_interrupt() && !ac->nodemask)
4928
			ac->nodemask = &cpuset_current_mems_allowed;
4929 4930
		else
			*alloc_flags |= ALLOC_CPUSET;
4931 4932
	}

4933 4934
	fs_reclaim_acquire(gfp_mask);
	fs_reclaim_release(gfp_mask);
4935

4936
	might_sleep_if(gfp_mask & __GFP_DIRECT_RECLAIM);
4937 4938

	if (should_fail_alloc_page(gfp_mask, order))
4939
		return false;
4940

4941
	*alloc_flags = current_alloc_flags(gfp_mask, *alloc_flags);
4942

4943
	/* Dirty zone balancing only done in the fast path */
4944
	ac->spread_dirty_pages = (gfp_mask & __GFP_WRITE);
4945

4946 4947 4948 4949 4950
	/*
	 * The preferred zone is used for statistics but crucially it is
	 * also used as the starting point for the zonelist iterator. It
	 * may get reset for allocations that ignore memory policies.
	 */
4951
	ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
4952
					ac->highest_zoneidx, ac->nodemask);
4953 4954

	return true;
4955 4956 4957 4958 4959 4960
}

/*
 * This is the 'heart' of the zoned buddy allocator.
 */
struct page *
4961 4962
__alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, int preferred_nid,
							nodemask_t *nodemask)
4963 4964 4965
{
	struct page *page;
	unsigned int alloc_flags = ALLOC_WMARK_LOW;
4966
	gfp_t alloc_mask; /* The gfp_t that was actually used for allocation */
4967 4968
	struct alloc_context ac = { };

4969 4970 4971 4972 4973 4974 4975 4976 4977
	/*
	 * There are several places where we assume that the order value is sane
	 * so bail out early if the request is out of bound.
	 */
	if (unlikely(order >= MAX_ORDER)) {
		WARN_ON_ONCE(!(gfp_mask & __GFP_NOWARN));
		return NULL;
	}

4978
	gfp_mask &= gfp_allowed_mask;
4979
	alloc_mask = gfp_mask;
4980
	if (!prepare_alloc_pages(gfp_mask, order, preferred_nid, nodemask, &ac, &alloc_mask, &alloc_flags))
4981 4982
		return NULL;

4983 4984 4985 4986
	/*
	 * Forbid the first pass from falling back to types that fragment
	 * memory until all local zones are considered.
	 */
4987
	alloc_flags |= alloc_flags_nofragment(ac.preferred_zoneref->zone, gfp_mask);
4988

4989
	/* First allocation attempt */
4990
	page = get_page_from_freelist(alloc_mask, order, alloc_flags, &ac);
4991 4992
	if (likely(page))
		goto out;
4993

4994
	/*
4995 4996 4997 4998
	 * Apply scoped allocation constraints. This is mainly about GFP_NOFS
	 * resp. GFP_NOIO which has to be inherited for all allocation requests
	 * from a particular context which has been marked by
	 * memalloc_no{fs,io}_{save,restore}.
4999
	 */
5000
	alloc_mask = current_gfp_context(gfp_mask);
5001
	ac.spread_dirty_pages = false;
5002

5003 5004 5005 5006
	/*
	 * Restore the original nodemask if it was potentially replaced with
	 * &cpuset_current_mems_allowed to optimize the fast-path attempt.
	 */
5007
	ac.nodemask = nodemask;
5008

5009
	page = __alloc_pages_slowpath(alloc_mask, order, &ac);
5010

5011
out:
5012
	if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
5013
	    unlikely(__memcg_kmem_charge_page(page, gfp_mask, order) != 0)) {
5014 5015
		__free_pages(page, order);
		page = NULL;
5016 5017
	}

5018 5019
	trace_mm_page_alloc(page, order, alloc_mask, ac.migratetype);

5020
	return page;
L
Linus Torvalds 已提交
5021
}
5022
EXPORT_SYMBOL(__alloc_pages_nodemask);
L
Linus Torvalds 已提交
5023 5024

/*
5025 5026 5027
 * Common helper functions. Never use with __GFP_HIGHMEM because the returned
 * address cannot represent highmem pages. Use alloc_pages and then kmap if
 * you need to access high mem.
L
Linus Torvalds 已提交
5028
 */
H
Harvey Harrison 已提交
5029
unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
L
Linus Torvalds 已提交
5030
{
5031 5032
	struct page *page;

5033
	page = alloc_pages(gfp_mask & ~__GFP_HIGHMEM, order);
L
Linus Torvalds 已提交
5034 5035 5036 5037 5038 5039
	if (!page)
		return 0;
	return (unsigned long) page_address(page);
}
EXPORT_SYMBOL(__get_free_pages);

H
Harvey Harrison 已提交
5040
unsigned long get_zeroed_page(gfp_t gfp_mask)
L
Linus Torvalds 已提交
5041
{
5042
	return __get_free_pages(gfp_mask | __GFP_ZERO, 0);
L
Linus Torvalds 已提交
5043 5044 5045
}
EXPORT_SYMBOL(get_zeroed_page);

5046
static inline void free_the_page(struct page *page, unsigned int order)
L
Linus Torvalds 已提交
5047
{
5048 5049 5050
	if (order == 0)		/* Via pcp? */
		free_unref_page(page);
	else
5051
		__free_pages_ok(page, order, FPI_NONE);
L
Linus Torvalds 已提交
5052 5053
}

5054 5055 5056 5057 5058 5059 5060 5061 5062 5063 5064 5065 5066 5067 5068 5069 5070 5071 5072 5073
/**
 * __free_pages - Free pages allocated with alloc_pages().
 * @page: The page pointer returned from alloc_pages().
 * @order: The order of the allocation.
 *
 * This function can free multi-page allocations that are not compound
 * pages.  It does not check that the @order passed in matches that of
 * the allocation, so it is easy to leak memory.  Freeing more memory
 * than was allocated will probably emit a warning.
 *
 * If the last reference to this page is speculative, it will be released
 * by put_page() which only frees the first page of a non-compound
 * allocation.  To prevent the remaining pages from being leaked, we free
 * the subsequent pages here.  If you want to use the page's reference
 * count to decide when to free the allocation, you should allocate a
 * compound page, and use put_page() instead of __free_pages().
 *
 * Context: May be called in interrupt context or while holding a normal
 * spinlock, but not in NMI context or while holding a raw spinlock.
 */
5074 5075 5076 5077
void __free_pages(struct page *page, unsigned int order)
{
	if (put_page_testzero(page))
		free_the_page(page, order);
5078 5079 5080
	else if (!PageHead(page))
		while (order-- > 0)
			free_the_page(page + (1 << order), order);
5081
}
L
Linus Torvalds 已提交
5082 5083
EXPORT_SYMBOL(__free_pages);

H
Harvey Harrison 已提交
5084
void free_pages(unsigned long addr, unsigned int order)
L
Linus Torvalds 已提交
5085 5086
{
	if (addr != 0) {
N
Nick Piggin 已提交
5087
		VM_BUG_ON(!virt_addr_valid((void *)addr));
L
Linus Torvalds 已提交
5088 5089 5090 5091 5092 5093
		__free_pages(virt_to_page((void *)addr), order);
	}
}

EXPORT_SYMBOL(free_pages);

5094 5095 5096 5097 5098 5099 5100 5101 5102 5103 5104
/*
 * Page Fragment:
 *  An arbitrary-length arbitrary-offset area of memory which resides
 *  within a 0 or higher order page.  Multiple fragments within that page
 *  are individually refcounted, in the page's reference counter.
 *
 * The page_frag functions below provide a simple allocation framework for
 * page fragments.  This is used by the network stack and network device
 * drivers to provide a backing region of memory for use as either an
 * sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
 */
5105 5106
static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
					     gfp_t gfp_mask)
5107 5108 5109 5110 5111 5112 5113 5114 5115 5116 5117 5118 5119 5120 5121 5122 5123 5124 5125
{
	struct page *page = NULL;
	gfp_t gfp = gfp_mask;

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
	gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
		    __GFP_NOMEMALLOC;
	page = alloc_pages_node(NUMA_NO_NODE, gfp_mask,
				PAGE_FRAG_CACHE_MAX_ORDER);
	nc->size = page ? PAGE_FRAG_CACHE_MAX_SIZE : PAGE_SIZE;
#endif
	if (unlikely(!page))
		page = alloc_pages_node(NUMA_NO_NODE, gfp, 0);

	nc->va = page ? page_address(page) : NULL;

	return page;
}

5126
void __page_frag_cache_drain(struct page *page, unsigned int count)
5127 5128 5129
{
	VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);

5130 5131
	if (page_ref_sub_and_test(page, count))
		free_the_page(page, compound_order(page));
5132
}
5133
EXPORT_SYMBOL(__page_frag_cache_drain);
5134

5135 5136
void *page_frag_alloc(struct page_frag_cache *nc,
		      unsigned int fragsz, gfp_t gfp_mask)
5137 5138 5139 5140 5141 5142 5143
{
	unsigned int size = PAGE_SIZE;
	struct page *page;
	int offset;

	if (unlikely(!nc->va)) {
refill:
5144
		page = __page_frag_cache_refill(nc, gfp_mask);
5145 5146 5147 5148 5149 5150 5151 5152 5153 5154
		if (!page)
			return NULL;

#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
		/* if size can vary use size else just use PAGE_SIZE */
		size = nc->size;
#endif
		/* Even if we own the page, we do not use atomic_set().
		 * This would break get_page_unless_zero() users.
		 */
5155
		page_ref_add(page, PAGE_FRAG_CACHE_MAX_SIZE);
5156 5157

		/* reset page count bias and offset to start of new frag */
5158
		nc->pfmemalloc = page_is_pfmemalloc(page);
5159
		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5160 5161 5162 5163 5164 5165 5166
		nc->offset = size;
	}

	offset = nc->offset - fragsz;
	if (unlikely(offset < 0)) {
		page = virt_to_page(nc->va);

5167
		if (!page_ref_sub_and_test(page, nc->pagecnt_bias))
5168 5169
			goto refill;

5170 5171 5172 5173 5174
		if (unlikely(nc->pfmemalloc)) {
			free_the_page(page, compound_order(page));
			goto refill;
		}

5175 5176 5177 5178 5179
#if (PAGE_SIZE < PAGE_FRAG_CACHE_MAX_SIZE)
		/* if size can vary use size else just use PAGE_SIZE */
		size = nc->size;
#endif
		/* OK, page count is 0, we can safely set it */
5180
		set_page_count(page, PAGE_FRAG_CACHE_MAX_SIZE + 1);
5181 5182

		/* reset page count bias and offset to start of new frag */
5183
		nc->pagecnt_bias = PAGE_FRAG_CACHE_MAX_SIZE + 1;
5184 5185 5186 5187 5188 5189 5190 5191
		offset = size - fragsz;
	}

	nc->pagecnt_bias--;
	nc->offset = offset;

	return nc->va + offset;
}
5192
EXPORT_SYMBOL(page_frag_alloc);
5193 5194 5195 5196

/*
 * Frees a page fragment allocated out of either a compound or order 0 page.
 */
5197
void page_frag_free(void *addr)
5198 5199 5200
{
	struct page *page = virt_to_head_page(addr);

5201 5202
	if (unlikely(put_page_testzero(page)))
		free_the_page(page, compound_order(page));
5203
}
5204
EXPORT_SYMBOL(page_frag_free);
5205

5206 5207
static void *make_alloc_exact(unsigned long addr, unsigned int order,
		size_t size)
A
Andi Kleen 已提交
5208 5209 5210 5211 5212 5213 5214 5215 5216 5217 5218 5219 5220 5221
{
	if (addr) {
		unsigned long alloc_end = addr + (PAGE_SIZE << order);
		unsigned long used = addr + PAGE_ALIGN(size);

		split_page(virt_to_page((void *)addr), order);
		while (used < alloc_end) {
			free_page(used);
			used += PAGE_SIZE;
		}
	}
	return (void *)addr;
}

5222 5223 5224
/**
 * alloc_pages_exact - allocate an exact number physically-contiguous pages.
 * @size: the number of bytes to allocate
5225
 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
5226 5227 5228 5229 5230 5231 5232 5233
 *
 * This function is similar to alloc_pages(), except that it allocates the
 * minimum number of pages to satisfy the request.  alloc_pages() can only
 * allocate memory in power-of-two pages.
 *
 * This function is also limited by MAX_ORDER.
 *
 * Memory allocated by this function must be released by free_pages_exact().
5234 5235
 *
 * Return: pointer to the allocated area or %NULL in case of error.
5236 5237 5238 5239 5240 5241
 */
void *alloc_pages_exact(size_t size, gfp_t gfp_mask)
{
	unsigned int order = get_order(size);
	unsigned long addr;

5242 5243 5244
	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
		gfp_mask &= ~__GFP_COMP;

5245
	addr = __get_free_pages(gfp_mask, order);
A
Andi Kleen 已提交
5246
	return make_alloc_exact(addr, order, size);
5247 5248 5249
}
EXPORT_SYMBOL(alloc_pages_exact);

A
Andi Kleen 已提交
5250 5251 5252
/**
 * alloc_pages_exact_nid - allocate an exact number of physically-contiguous
 *			   pages on a node.
5253
 * @nid: the preferred node ID where memory should be allocated
A
Andi Kleen 已提交
5254
 * @size: the number of bytes to allocate
5255
 * @gfp_mask: GFP flags for the allocation, must not contain __GFP_COMP
A
Andi Kleen 已提交
5256 5257 5258
 *
 * Like alloc_pages_exact(), but try to allocate on node nid first before falling
 * back.
5259 5260
 *
 * Return: pointer to the allocated area or %NULL in case of error.
A
Andi Kleen 已提交
5261
 */
5262
void * __meminit alloc_pages_exact_nid(int nid, size_t size, gfp_t gfp_mask)
A
Andi Kleen 已提交
5263
{
5264
	unsigned int order = get_order(size);
5265 5266 5267 5268 5269 5270
	struct page *p;

	if (WARN_ON_ONCE(gfp_mask & __GFP_COMP))
		gfp_mask &= ~__GFP_COMP;

	p = alloc_pages_node(nid, gfp_mask, order);
A
Andi Kleen 已提交
5271 5272 5273 5274 5275
	if (!p)
		return NULL;
	return make_alloc_exact((unsigned long)page_address(p), order, size);
}

5276 5277 5278 5279 5280 5281 5282 5283 5284 5285 5286 5287 5288 5289 5290 5291 5292 5293 5294
/**
 * free_pages_exact - release memory allocated via alloc_pages_exact()
 * @virt: the value returned by alloc_pages_exact.
 * @size: size of allocation, same value as passed to alloc_pages_exact().
 *
 * Release the memory allocated by a previous call to alloc_pages_exact.
 */
void free_pages_exact(void *virt, size_t size)
{
	unsigned long addr = (unsigned long)virt;
	unsigned long end = addr + PAGE_ALIGN(size);

	while (addr < end) {
		free_page(addr);
		addr += PAGE_SIZE;
	}
}
EXPORT_SYMBOL(free_pages_exact);

5295 5296 5297 5298
/**
 * nr_free_zone_pages - count number of pages beyond high watermark
 * @offset: The zone index of the highest zone
 *
5299
 * nr_free_zone_pages() counts the number of pages which are beyond the
5300 5301
 * high watermark within all zones at or below a given zone index.  For each
 * zone, the number of pages is calculated as:
5302 5303
 *
 *     nr_free_zone_pages = managed_pages - high_pages
5304 5305
 *
 * Return: number of pages beyond high watermark.
5306
 */
5307
static unsigned long nr_free_zone_pages(int offset)
L
Linus Torvalds 已提交
5308
{
5309
	struct zoneref *z;
5310 5311
	struct zone *zone;

5312
	/* Just pick one node, since fallback list is circular */
5313
	unsigned long sum = 0;
L
Linus Torvalds 已提交
5314

5315
	struct zonelist *zonelist = node_zonelist(numa_node_id(), GFP_KERNEL);
L
Linus Torvalds 已提交
5316

5317
	for_each_zone_zonelist(zone, z, zonelist, offset) {
5318
		unsigned long size = zone_managed_pages(zone);
5319
		unsigned long high = high_wmark_pages(zone);
5320 5321
		if (size > high)
			sum += size - high;
L
Linus Torvalds 已提交
5322 5323 5324 5325 5326
	}

	return sum;
}

5327 5328 5329 5330 5331
/**
 * nr_free_buffer_pages - count number of pages beyond high watermark
 *
 * nr_free_buffer_pages() counts the number of pages which are beyond the high
 * watermark within ZONE_DMA and ZONE_NORMAL.
5332 5333 5334
 *
 * Return: number of pages beyond high watermark within ZONE_DMA and
 * ZONE_NORMAL.
L
Linus Torvalds 已提交
5335
 */
5336
unsigned long nr_free_buffer_pages(void)
L
Linus Torvalds 已提交
5337
{
A
Al Viro 已提交
5338
	return nr_free_zone_pages(gfp_zone(GFP_USER));
L
Linus Torvalds 已提交
5339
}
5340
EXPORT_SYMBOL_GPL(nr_free_buffer_pages);
L
Linus Torvalds 已提交
5341

5342
static inline void show_node(struct zone *zone)
L
Linus Torvalds 已提交
5343
{
5344
	if (IS_ENABLED(CONFIG_NUMA))
5345
		printk("Node %d ", zone_to_nid(zone));
L
Linus Torvalds 已提交
5346 5347
}

5348 5349 5350 5351 5352 5353
long si_mem_available(void)
{
	long available;
	unsigned long pagecache;
	unsigned long wmark_low = 0;
	unsigned long pages[NR_LRU_LISTS];
5354
	unsigned long reclaimable;
5355 5356 5357 5358
	struct zone *zone;
	int lru;

	for (lru = LRU_BASE; lru < NR_LRU_LISTS; lru++)
5359
		pages[lru] = global_node_page_state(NR_LRU_BASE + lru);
5360 5361

	for_each_zone(zone)
5362
		wmark_low += low_wmark_pages(zone);
5363 5364 5365 5366 5367

	/*
	 * Estimate the amount of memory available for userspace allocations,
	 * without causing swapping.
	 */
5368
	available = global_zone_page_state(NR_FREE_PAGES) - totalreserve_pages;
5369 5370 5371 5372 5373 5374 5375 5376 5377 5378 5379

	/*
	 * Not all the page cache can be freed, otherwise the system will
	 * start swapping. Assume at least half of the page cache, or the
	 * low watermark worth of cache, needs to stay.
	 */
	pagecache = pages[LRU_ACTIVE_FILE] + pages[LRU_INACTIVE_FILE];
	pagecache -= min(pagecache / 2, wmark_low);
	available += pagecache;

	/*
5380 5381 5382
	 * Part of the reclaimable slab and other kernel memory consists of
	 * items that are in use, and cannot be freed. Cap this estimate at the
	 * low watermark.
5383
	 */
5384 5385
	reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) +
		global_node_page_state(NR_KERNEL_MISC_RECLAIMABLE);
5386
	available += reclaimable - min(reclaimable / 2, wmark_low);
5387

5388 5389 5390 5391 5392 5393
	if (available < 0)
		available = 0;
	return available;
}
EXPORT_SYMBOL_GPL(si_mem_available);

L
Linus Torvalds 已提交
5394 5395
void si_meminfo(struct sysinfo *val)
{
5396
	val->totalram = totalram_pages();
5397
	val->sharedram = global_node_page_state(NR_SHMEM);
5398
	val->freeram = global_zone_page_state(NR_FREE_PAGES);
L
Linus Torvalds 已提交
5399
	val->bufferram = nr_blockdev_pages();
5400
	val->totalhigh = totalhigh_pages();
L
Linus Torvalds 已提交
5401 5402 5403 5404 5405 5406 5407 5408 5409
	val->freehigh = nr_free_highpages();
	val->mem_unit = PAGE_SIZE;
}

EXPORT_SYMBOL(si_meminfo);

#ifdef CONFIG_NUMA
void si_meminfo_node(struct sysinfo *val, int nid)
{
5410 5411
	int zone_type;		/* needs to be signed */
	unsigned long managed_pages = 0;
5412 5413
	unsigned long managed_highpages = 0;
	unsigned long free_highpages = 0;
L
Linus Torvalds 已提交
5414 5415
	pg_data_t *pgdat = NODE_DATA(nid);

5416
	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
5417
		managed_pages += zone_managed_pages(&pgdat->node_zones[zone_type]);
5418
	val->totalram = managed_pages;
5419
	val->sharedram = node_page_state(pgdat, NR_SHMEM);
5420
	val->freeram = sum_zone_node_page_state(nid, NR_FREE_PAGES);
5421
#ifdef CONFIG_HIGHMEM
5422 5423 5424 5425
	for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++) {
		struct zone *zone = &pgdat->node_zones[zone_type];

		if (is_highmem(zone)) {
5426
			managed_highpages += zone_managed_pages(zone);
5427 5428 5429 5430 5431
			free_highpages += zone_page_state(zone, NR_FREE_PAGES);
		}
	}
	val->totalhigh = managed_highpages;
	val->freehigh = free_highpages;
5432
#else
5433 5434
	val->totalhigh = managed_highpages;
	val->freehigh = free_highpages;
5435
#endif
L
Linus Torvalds 已提交
5436 5437 5438 5439
	val->mem_unit = PAGE_SIZE;
}
#endif

5440
/*
5441 5442
 * Determine whether the node should be displayed or not, depending on whether
 * SHOW_MEM_FILTER_NODES was passed to show_free_areas().
5443
 */
5444
static bool show_mem_node_skip(unsigned int flags, int nid, nodemask_t *nodemask)
5445 5446
{
	if (!(flags & SHOW_MEM_FILTER_NODES))
5447
		return false;
5448

5449 5450 5451 5452 5453 5454 5455 5456 5457
	/*
	 * no node mask - aka implicit memory numa policy. Do not bother with
	 * the synchronization - read_mems_allowed_begin - because we do not
	 * have to be precise here.
	 */
	if (!nodemask)
		nodemask = &cpuset_current_mems_allowed;

	return !node_isset(nid, *nodemask);
5458 5459
}

L
Linus Torvalds 已提交
5460 5461
#define K(x) ((x) << (PAGE_SHIFT-10))

5462 5463 5464 5465 5466
static void show_migration_types(unsigned char type)
{
	static const char types[MIGRATE_TYPES] = {
		[MIGRATE_UNMOVABLE]	= 'U',
		[MIGRATE_MOVABLE]	= 'M',
5467 5468
		[MIGRATE_RECLAIMABLE]	= 'E',
		[MIGRATE_HIGHATOMIC]	= 'H',
5469 5470 5471
#ifdef CONFIG_CMA
		[MIGRATE_CMA]		= 'C',
#endif
5472
#ifdef CONFIG_MEMORY_ISOLATION
5473
		[MIGRATE_ISOLATE]	= 'I',
5474
#endif
5475 5476 5477 5478 5479 5480 5481 5482 5483 5484 5485
	};
	char tmp[MIGRATE_TYPES + 1];
	char *p = tmp;
	int i;

	for (i = 0; i < MIGRATE_TYPES; i++) {
		if (type & (1 << i))
			*p++ = types[i];
	}

	*p = '\0';
5486
	printk(KERN_CONT "(%s) ", tmp);
5487 5488
}

L
Linus Torvalds 已提交
5489 5490 5491 5492
/*
 * Show free area list (used inside shift_scroll-lock stuff)
 * We also calculate the percentage fragmentation. We do this by counting the
 * memory on each free list with the exception of the first item on the list.
5493 5494 5495 5496
 *
 * Bits in @filter:
 * SHOW_MEM_FILTER_NODES: suppress nodes that are not allowed by current's
 *   cpuset.
L
Linus Torvalds 已提交
5497
 */
5498
void show_free_areas(unsigned int filter, nodemask_t *nodemask)
L
Linus Torvalds 已提交
5499
{
5500
	unsigned long free_pcp = 0;
5501
	int cpu;
L
Linus Torvalds 已提交
5502
	struct zone *zone;
M
Mel Gorman 已提交
5503
	pg_data_t *pgdat;
L
Linus Torvalds 已提交
5504

5505
	for_each_populated_zone(zone) {
5506
		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5507
			continue;
5508

5509 5510
		for_each_online_cpu(cpu)
			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;
L
Linus Torvalds 已提交
5511 5512
	}

K
KOSAKI Motohiro 已提交
5513 5514
	printk("active_anon:%lu inactive_anon:%lu isolated_anon:%lu\n"
		" active_file:%lu inactive_file:%lu isolated_file:%lu\n"
5515
		" unevictable:%lu dirty:%lu writeback:%lu\n"
5516
		" slab_reclaimable:%lu slab_unreclaimable:%lu\n"
5517
		" mapped:%lu shmem:%lu pagetables:%lu bounce:%lu\n"
5518
		" free:%lu free_pcp:%lu free_cma:%lu\n",
M
Mel Gorman 已提交
5519 5520 5521 5522 5523 5524 5525
		global_node_page_state(NR_ACTIVE_ANON),
		global_node_page_state(NR_INACTIVE_ANON),
		global_node_page_state(NR_ISOLATED_ANON),
		global_node_page_state(NR_ACTIVE_FILE),
		global_node_page_state(NR_INACTIVE_FILE),
		global_node_page_state(NR_ISOLATED_FILE),
		global_node_page_state(NR_UNEVICTABLE),
5526 5527
		global_node_page_state(NR_FILE_DIRTY),
		global_node_page_state(NR_WRITEBACK),
5528 5529
		global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B),
		global_node_page_state_pages(NR_SLAB_UNRECLAIMABLE_B),
5530
		global_node_page_state(NR_FILE_MAPPED),
5531
		global_node_page_state(NR_SHMEM),
5532
		global_node_page_state(NR_PAGETABLE),
5533 5534
		global_zone_page_state(NR_BOUNCE),
		global_zone_page_state(NR_FREE_PAGES),
5535
		free_pcp,
5536
		global_zone_page_state(NR_FREE_CMA_PAGES));
L
Linus Torvalds 已提交
5537

M
Mel Gorman 已提交
5538
	for_each_online_pgdat(pgdat) {
5539
		if (show_mem_node_skip(filter, pgdat->node_id, nodemask))
5540 5541
			continue;

M
Mel Gorman 已提交
5542 5543 5544 5545 5546 5547 5548 5549
		printk("Node %d"
			" active_anon:%lukB"
			" inactive_anon:%lukB"
			" active_file:%lukB"
			" inactive_file:%lukB"
			" unevictable:%lukB"
			" isolated(anon):%lukB"
			" isolated(file):%lukB"
5550
			" mapped:%lukB"
5551 5552 5553 5554 5555 5556 5557 5558 5559
			" dirty:%lukB"
			" writeback:%lukB"
			" shmem:%lukB"
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
			" shmem_thp: %lukB"
			" shmem_pmdmapped: %lukB"
			" anon_thp: %lukB"
#endif
			" writeback_tmp:%lukB"
5560 5561 5562 5563
			" kernel_stack:%lukB"
#ifdef CONFIG_SHADOW_CALL_STACK
			" shadow_call_stack:%lukB"
#endif
5564
			" pagetables:%lukB"
M
Mel Gorman 已提交
5565 5566 5567 5568 5569 5570 5571 5572 5573 5574
			" all_unreclaimable? %s"
			"\n",
			pgdat->node_id,
			K(node_page_state(pgdat, NR_ACTIVE_ANON)),
			K(node_page_state(pgdat, NR_INACTIVE_ANON)),
			K(node_page_state(pgdat, NR_ACTIVE_FILE)),
			K(node_page_state(pgdat, NR_INACTIVE_FILE)),
			K(node_page_state(pgdat, NR_UNEVICTABLE)),
			K(node_page_state(pgdat, NR_ISOLATED_ANON)),
			K(node_page_state(pgdat, NR_ISOLATED_FILE)),
5575
			K(node_page_state(pgdat, NR_FILE_MAPPED)),
5576 5577
			K(node_page_state(pgdat, NR_FILE_DIRTY)),
			K(node_page_state(pgdat, NR_WRITEBACK)),
5578
			K(node_page_state(pgdat, NR_SHMEM)),
5579 5580 5581 5582 5583 5584 5585
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
			K(node_page_state(pgdat, NR_SHMEM_THPS) * HPAGE_PMD_NR),
			K(node_page_state(pgdat, NR_SHMEM_PMDMAPPED)
					* HPAGE_PMD_NR),
			K(node_page_state(pgdat, NR_ANON_THPS) * HPAGE_PMD_NR),
#endif
			K(node_page_state(pgdat, NR_WRITEBACK_TEMP)),
5586 5587 5588 5589
			node_page_state(pgdat, NR_KERNEL_STACK_KB),
#ifdef CONFIG_SHADOW_CALL_STACK
			node_page_state(pgdat, NR_KERNEL_SCS_KB),
#endif
5590
			K(node_page_state(pgdat, NR_PAGETABLE)),
5591 5592
			pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES ?
				"yes" : "no");
M
Mel Gorman 已提交
5593 5594
	}

5595
	for_each_populated_zone(zone) {
L
Linus Torvalds 已提交
5596 5597
		int i;

5598
		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5599
			continue;
5600 5601 5602 5603 5604

		free_pcp = 0;
		for_each_online_cpu(cpu)
			free_pcp += per_cpu_ptr(zone->pageset, cpu)->pcp.count;

L
Linus Torvalds 已提交
5605
		show_node(zone);
5606 5607
		printk(KERN_CONT
			"%s"
L
Linus Torvalds 已提交
5608 5609 5610 5611
			" free:%lukB"
			" min:%lukB"
			" low:%lukB"
			" high:%lukB"
5612
			" reserved_highatomic:%luKB"
M
Minchan Kim 已提交
5613 5614 5615 5616 5617
			" active_anon:%lukB"
			" inactive_anon:%lukB"
			" active_file:%lukB"
			" inactive_file:%lukB"
			" unevictable:%lukB"
5618
			" writepending:%lukB"
L
Linus Torvalds 已提交
5619
			" present:%lukB"
5620
			" managed:%lukB"
5621 5622
			" mlocked:%lukB"
			" bounce:%lukB"
5623 5624
			" free_pcp:%lukB"
			" local_pcp:%ukB"
5625
			" free_cma:%lukB"
L
Linus Torvalds 已提交
5626 5627
			"\n",
			zone->name,
5628
			K(zone_page_state(zone, NR_FREE_PAGES)),
5629 5630 5631
			K(min_wmark_pages(zone)),
			K(low_wmark_pages(zone)),
			K(high_wmark_pages(zone)),
5632
			K(zone->nr_reserved_highatomic),
M
Minchan Kim 已提交
5633 5634 5635 5636 5637
			K(zone_page_state(zone, NR_ZONE_ACTIVE_ANON)),
			K(zone_page_state(zone, NR_ZONE_INACTIVE_ANON)),
			K(zone_page_state(zone, NR_ZONE_ACTIVE_FILE)),
			K(zone_page_state(zone, NR_ZONE_INACTIVE_FILE)),
			K(zone_page_state(zone, NR_ZONE_UNEVICTABLE)),
5638
			K(zone_page_state(zone, NR_ZONE_WRITE_PENDING)),
L
Linus Torvalds 已提交
5639
			K(zone->present_pages),
5640
			K(zone_managed_pages(zone)),
5641 5642
			K(zone_page_state(zone, NR_MLOCK)),
			K(zone_page_state(zone, NR_BOUNCE)),
5643 5644
			K(free_pcp),
			K(this_cpu_read(zone->pageset->pcp.count)),
5645
			K(zone_page_state(zone, NR_FREE_CMA_PAGES)));
L
Linus Torvalds 已提交
5646 5647
		printk("lowmem_reserve[]:");
		for (i = 0; i < MAX_NR_ZONES; i++)
5648 5649
			printk(KERN_CONT " %ld", zone->lowmem_reserve[i]);
		printk(KERN_CONT "\n");
L
Linus Torvalds 已提交
5650 5651
	}

5652
	for_each_populated_zone(zone) {
5653 5654
		unsigned int order;
		unsigned long nr[MAX_ORDER], flags, total = 0;
5655
		unsigned char types[MAX_ORDER];
L
Linus Torvalds 已提交
5656

5657
		if (show_mem_node_skip(filter, zone_to_nid(zone), nodemask))
5658
			continue;
L
Linus Torvalds 已提交
5659
		show_node(zone);
5660
		printk(KERN_CONT "%s: ", zone->name);
L
Linus Torvalds 已提交
5661 5662 5663

		spin_lock_irqsave(&zone->lock, flags);
		for (order = 0; order < MAX_ORDER; order++) {
5664 5665 5666 5667
			struct free_area *area = &zone->free_area[order];
			int type;

			nr[order] = area->nr_free;
5668
			total += nr[order] << order;
5669 5670 5671

			types[order] = 0;
			for (type = 0; type < MIGRATE_TYPES; type++) {
5672
				if (!free_area_empty(area, type))
5673 5674
					types[order] |= 1 << type;
			}
L
Linus Torvalds 已提交
5675 5676
		}
		spin_unlock_irqrestore(&zone->lock, flags);
5677
		for (order = 0; order < MAX_ORDER; order++) {
5678 5679
			printk(KERN_CONT "%lu*%lukB ",
			       nr[order], K(1UL) << order);
5680 5681 5682
			if (nr[order])
				show_migration_types(types[order]);
		}
5683
		printk(KERN_CONT "= %lukB\n", K(total));
L
Linus Torvalds 已提交
5684 5685
	}

5686 5687
	hugetlb_show_meminfo();

5688
	printk("%ld total pagecache pages\n", global_node_page_state(NR_FILE_PAGES));
5689

L
Linus Torvalds 已提交
5690 5691 5692
	show_swap_cache_info();
}

5693 5694 5695 5696 5697 5698
static void zoneref_set_zone(struct zone *zone, struct zoneref *zoneref)
{
	zoneref->zone = zone;
	zoneref->zone_idx = zone_idx(zone);
}

L
Linus Torvalds 已提交
5699 5700
/*
 * Builds allocation fallback zone lists.
5701 5702
 *
 * Add all populated zones of a node to the zonelist.
L
Linus Torvalds 已提交
5703
 */
5704
static int build_zonerefs_node(pg_data_t *pgdat, struct zoneref *zonerefs)
L
Linus Torvalds 已提交
5705
{
5706
	struct zone *zone;
5707
	enum zone_type zone_type = MAX_NR_ZONES;
5708
	int nr_zones = 0;
5709 5710

	do {
5711
		zone_type--;
5712
		zone = pgdat->node_zones + zone_type;
5713
		if (managed_zone(zone)) {
5714
			zoneref_set_zone(zone, &zonerefs[nr_zones++]);
5715
			check_highest_zone(zone_type);
L
Linus Torvalds 已提交
5716
		}
5717
	} while (zone_type);
5718

5719
	return nr_zones;
L
Linus Torvalds 已提交
5720 5721 5722
}

#ifdef CONFIG_NUMA
5723 5724 5725

static int __parse_numa_zonelist_order(char *s)
{
5726 5727 5728 5729 5730 5731 5732 5733
	/*
	 * We used to support different zonlists modes but they turned
	 * out to be just not useful. Let's keep the warning in place
	 * if somebody still use the cmd line parameter so that we do
	 * not fail it silently
	 */
	if (!(*s == 'd' || *s == 'D' || *s == 'n' || *s == 'N')) {
		pr_warn("Ignoring unsupported numa_zonelist_order value:  %s\n", s);
5734 5735 5736 5737 5738
		return -EINVAL;
	}
	return 0;
}

5739 5740
char numa_zonelist_order[] = "Node";

5741 5742 5743
/*
 * sysctl handler for numa_zonelist_order
 */
5744
int numa_zonelist_order_handler(struct ctl_table *table, int write,
5745
		void *buffer, size_t *length, loff_t *ppos)
5746
{
5747 5748 5749
	if (write)
		return __parse_numa_zonelist_order(buffer);
	return proc_dostring(table, write, buffer, length, ppos);
5750 5751 5752
}


5753
#define MAX_NODE_LOAD (nr_online_nodes)
5754 5755
static int node_load[MAX_NUMNODES];

L
Linus Torvalds 已提交
5756
/**
5757
 * find_next_best_node - find the next node that should appear in a given node's fallback list
L
Linus Torvalds 已提交
5758 5759 5760 5761 5762 5763 5764 5765 5766 5767
 * @node: node whose fallback list we're appending
 * @used_node_mask: nodemask_t of already used nodes
 *
 * We use a number of factors to determine which is the next node that should
 * appear on a given node's fallback list.  The node should not have appeared
 * already in @node's fallback list, and it should be the next closest node
 * according to the distance array (which contains arbitrary distance values
 * from each node to each node in the system), and should also prefer nodes
 * with no CPUs, since presumably they'll have very little allocation pressure
 * on them otherwise.
5768 5769
 *
 * Return: node id of the found node or %NUMA_NO_NODE if no node is found.
L
Linus Torvalds 已提交
5770
 */
5771
static int find_next_best_node(int node, nodemask_t *used_node_mask)
L
Linus Torvalds 已提交
5772
{
5773
	int n, val;
L
Linus Torvalds 已提交
5774
	int min_val = INT_MAX;
D
David Rientjes 已提交
5775
	int best_node = NUMA_NO_NODE;
L
Linus Torvalds 已提交
5776

5777 5778 5779 5780 5781
	/* Use the local node if we haven't already */
	if (!node_isset(node, *used_node_mask)) {
		node_set(node, *used_node_mask);
		return node;
	}
L
Linus Torvalds 已提交
5782

5783
	for_each_node_state(n, N_MEMORY) {
L
Linus Torvalds 已提交
5784 5785 5786 5787 5788 5789 5790 5791

		/* Don't want a node to appear more than once */
		if (node_isset(n, *used_node_mask))
			continue;

		/* Use the distance array to find the distance */
		val = node_distance(node, n);

5792 5793 5794
		/* Penalize nodes under us ("prefer the next node") */
		val += (n < node);

L
Linus Torvalds 已提交
5795
		/* Give preference to headless and unused nodes */
5796
		if (!cpumask_empty(cpumask_of_node(n)))
L
Linus Torvalds 已提交
5797 5798 5799 5800 5801 5802 5803 5804 5805 5806 5807 5808 5809 5810 5811 5812 5813 5814
			val += PENALTY_FOR_NODE_WITH_CPUS;

		/* Slight preference for less loaded node */
		val *= (MAX_NODE_LOAD*MAX_NUMNODES);
		val += node_load[n];

		if (val < min_val) {
			min_val = val;
			best_node = n;
		}
	}

	if (best_node >= 0)
		node_set(best_node, *used_node_mask);

	return best_node;
}

5815 5816 5817 5818 5819 5820

/*
 * Build zonelists ordered by node and zones within node.
 * This results in maximum locality--normal zone overflows into local
 * DMA zone, if any--but risks exhausting DMA zone.
 */
5821 5822
static void build_zonelists_in_node_order(pg_data_t *pgdat, int *node_order,
		unsigned nr_nodes)
L
Linus Torvalds 已提交
5823
{
5824 5825 5826 5827 5828 5829 5830 5831 5832
	struct zoneref *zonerefs;
	int i;

	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;

	for (i = 0; i < nr_nodes; i++) {
		int nr_zones;

		pg_data_t *node = NODE_DATA(node_order[i]);
5833

5834 5835 5836 5837 5838
		nr_zones = build_zonerefs_node(node, zonerefs);
		zonerefs += nr_zones;
	}
	zonerefs->zone = NULL;
	zonerefs->zone_idx = 0;
5839 5840
}

5841 5842 5843 5844 5845
/*
 * Build gfp_thisnode zonelists
 */
static void build_thisnode_zonelists(pg_data_t *pgdat)
{
5846 5847
	struct zoneref *zonerefs;
	int nr_zones;
5848

5849 5850 5851 5852 5853
	zonerefs = pgdat->node_zonelists[ZONELIST_NOFALLBACK]._zonerefs;
	nr_zones = build_zonerefs_node(pgdat, zonerefs);
	zonerefs += nr_zones;
	zonerefs->zone = NULL;
	zonerefs->zone_idx = 0;
5854 5855
}

5856 5857 5858 5859 5860 5861 5862 5863 5864
/*
 * Build zonelists ordered by zone and nodes within zones.
 * This results in conserving DMA zone[s] until all Normal memory is
 * exhausted, but results in overflowing to remote node while memory
 * may still exist in local DMA zone.
 */

static void build_zonelists(pg_data_t *pgdat)
{
5865 5866
	static int node_order[MAX_NUMNODES];
	int node, load, nr_nodes = 0;
5867
	nodemask_t used_mask = NODE_MASK_NONE;
5868
	int local_node, prev_node;
L
Linus Torvalds 已提交
5869 5870 5871

	/* NUMA-aware ordering of nodes */
	local_node = pgdat->node_id;
5872
	load = nr_online_nodes;
L
Linus Torvalds 已提交
5873
	prev_node = local_node;
5874 5875

	memset(node_order, 0, sizeof(node_order));
L
Linus Torvalds 已提交
5876 5877 5878 5879 5880 5881
	while ((node = find_next_best_node(local_node, &used_mask)) >= 0) {
		/*
		 * We don't want to pressure a particular node.
		 * So adding penalty to the first node in same
		 * distance group to make it round-robin.
		 */
5882 5883
		if (node_distance(local_node, node) !=
		    node_distance(local_node, prev_node))
5884 5885
			node_load[node] = load;

5886
		node_order[nr_nodes++] = node;
L
Linus Torvalds 已提交
5887 5888 5889
		prev_node = node;
		load--;
	}
5890

5891
	build_zonelists_in_node_order(pgdat, node_order, nr_nodes);
5892
	build_thisnode_zonelists(pgdat);
L
Linus Torvalds 已提交
5893 5894
}

5895 5896 5897 5898 5899 5900 5901 5902 5903
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
/*
 * Return node id of node used for "local" allocations.
 * I.e., first node id of first zone in arg node's generic zonelist.
 * Used for initializing percpu 'numa_mem', which is used primarily
 * for kernel allocations, so use GFP_KERNEL flags to locate zonelist.
 */
int local_memory_node(int node)
{
5904
	struct zoneref *z;
5905

5906
	z = first_zones_zonelist(node_zonelist(node, GFP_KERNEL),
5907
				   gfp_zone(GFP_KERNEL),
5908
				   NULL);
5909
	return zone_to_nid(z->zone);
5910 5911
}
#endif
5912

5913 5914
static void setup_min_unmapped_ratio(void);
static void setup_min_slab_ratio(void);
L
Linus Torvalds 已提交
5915 5916
#else	/* CONFIG_NUMA */

5917
static void build_zonelists(pg_data_t *pgdat)
L
Linus Torvalds 已提交
5918
{
5919
	int node, local_node;
5920 5921
	struct zoneref *zonerefs;
	int nr_zones;
L
Linus Torvalds 已提交
5922 5923 5924

	local_node = pgdat->node_id;

5925 5926 5927
	zonerefs = pgdat->node_zonelists[ZONELIST_FALLBACK]._zonerefs;
	nr_zones = build_zonerefs_node(pgdat, zonerefs);
	zonerefs += nr_zones;
L
Linus Torvalds 已提交
5928

5929 5930 5931 5932 5933 5934 5935 5936 5937 5938 5939
	/*
	 * Now we build the zonelist so that it contains the zones
	 * of all the other nodes.
	 * We don't want to pressure a particular node, so when
	 * building the zones for node N, we make sure that the
	 * zones coming right after the local ones are those from
	 * node N+1 (modulo N)
	 */
	for (node = local_node + 1; node < MAX_NUMNODES; node++) {
		if (!node_online(node))
			continue;
5940 5941
		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
		zonerefs += nr_zones;
L
Linus Torvalds 已提交
5942
	}
5943 5944 5945
	for (node = 0; node < local_node; node++) {
		if (!node_online(node))
			continue;
5946 5947
		nr_zones = build_zonerefs_node(NODE_DATA(node), zonerefs);
		zonerefs += nr_zones;
5948 5949
	}

5950 5951
	zonerefs->zone = NULL;
	zonerefs->zone_idx = 0;
L
Linus Torvalds 已提交
5952 5953 5954 5955
}

#endif	/* CONFIG_NUMA */

5956 5957 5958 5959 5960 5961 5962 5963 5964 5965 5966 5967 5968 5969 5970
/*
 * Boot pageset table. One per cpu which is going to be used for all
 * zones and all nodes. The parameters will be set in such a way
 * that an item put on a list will immediately be handed over to
 * the buddy list. This is safe since pageset manipulation is done
 * with interrupts disabled.
 *
 * The boot_pagesets must be kept even after bootup is complete for
 * unused processors and/or zones. They do play a role for bootstrapping
 * hotplugged processors.
 *
 * zoneinfo_show() and maybe other functions do
 * not check if the processor is online before following the pageset pointer.
 * Other parts of the kernel may not check if the zone is available.
 */
5971
static void pageset_init(struct per_cpu_pageset *p);
5972 5973 5974
/* These effectively disable the pcplists in the boot pageset completely */
#define BOOT_PAGESET_HIGH	0
#define BOOT_PAGESET_BATCH	1
5975
static DEFINE_PER_CPU(struct per_cpu_pageset, boot_pageset);
5976
static DEFINE_PER_CPU(struct per_cpu_nodestat, boot_nodestats);
5977

5978
static void __build_all_zonelists(void *data)
L
Linus Torvalds 已提交
5979
{
5980
	int nid;
5981
	int __maybe_unused cpu;
5982
	pg_data_t *self = data;
5983 5984 5985
	static DEFINE_SPINLOCK(lock);

	spin_lock(&lock);
5986

5987 5988 5989
#ifdef CONFIG_NUMA
	memset(node_load, 0, sizeof(node_load));
#endif
5990

5991 5992 5993 5994
	/*
	 * This node is hotadded and no memory is yet present.   So just
	 * building zonelists is fine - no need to touch other nodes.
	 */
5995 5996
	if (self && !node_online(self->node_id)) {
		build_zonelists(self);
5997 5998 5999
	} else {
		for_each_online_node(nid) {
			pg_data_t *pgdat = NODE_DATA(nid);
6000

6001 6002
			build_zonelists(pgdat);
		}
6003

6004 6005 6006 6007 6008 6009 6010 6011 6012
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
		/*
		 * We now know the "local memory node" for each node--
		 * i.e., the node of the first zone in the generic zonelist.
		 * Set up numa_mem percpu variable for on-line cpus.  During
		 * boot, only the boot cpu should be on-line;  we'll init the
		 * secondary cpus' numa_mem as they come on-line.  During
		 * node/memory hotplug, we'll fixup all on-line cpus.
		 */
6013
		for_each_online_cpu(cpu)
6014
			set_cpu_numa_mem(cpu, local_memory_node(cpu_to_node(cpu)));
6015
#endif
6016
	}
6017 6018

	spin_unlock(&lock);
6019 6020
}

6021 6022 6023
static noinline void __init
build_all_zonelists_init(void)
{
6024 6025
	int cpu;

6026
	__build_all_zonelists(NULL);
6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041

	/*
	 * Initialize the boot_pagesets that are going to be used
	 * for bootstrapping processors. The real pagesets for
	 * each zone will be allocated later when the per cpu
	 * allocator is available.
	 *
	 * boot_pagesets are used also for bootstrapping offline
	 * cpus if the system is already booted because the pagesets
	 * are needed to initialize allocators on a specific cpu too.
	 * F.e. the percpu allocator needs the page allocator which
	 * needs the percpu allocator in order to allocate its pagesets
	 * (a chicken-egg dilemma).
	 */
	for_each_possible_cpu(cpu)
6042
		pageset_init(&per_cpu(boot_pageset, cpu));
6043

6044 6045 6046 6047
	mminit_verify_zonelist();
	cpuset_init_current_mems_allowed();
}

6048 6049
/*
 * unless system_state == SYSTEM_BOOTING.
6050
 *
6051
 * __ref due to call of __init annotated helper build_all_zonelists_init
6052
 * [protected by SYSTEM_BOOTING].
6053
 */
6054
void __ref build_all_zonelists(pg_data_t *pgdat)
6055
{
D
David Hildenbrand 已提交
6056 6057
	unsigned long vm_total_pages;

6058
	if (system_state == SYSTEM_BOOTING) {
6059
		build_all_zonelists_init();
6060
	} else {
6061
		__build_all_zonelists(pgdat);
6062 6063
		/* cpuset refresh routine should be here */
	}
6064 6065
	/* Get the number of free pages beyond high watermark in all zones. */
	vm_total_pages = nr_free_zone_pages(gfp_zone(GFP_HIGHUSER_MOVABLE));
6066 6067 6068 6069 6070 6071 6072
	/*
	 * Disable grouping by mobility if the number of pages in the
	 * system is too low to allow the mechanism to work. It would be
	 * more accurate, but expensive to check per-zone. This check is
	 * made on memory-hotadd so a system can start with mobility
	 * disabled and enable it later
	 */
6073
	if (vm_total_pages < (pageblock_nr_pages * MIGRATE_TYPES))
6074 6075 6076 6077
		page_group_by_mobility_disabled = 1;
	else
		page_group_by_mobility_disabled = 0;

6078
	pr_info("Built %u zonelists, mobility grouping %s.  Total pages: %ld\n",
J
Joe Perches 已提交
6079 6080 6081
		nr_online_nodes,
		page_group_by_mobility_disabled ? "off" : "on",
		vm_total_pages);
6082
#ifdef CONFIG_NUMA
6083
	pr_info("Policy zone: %s\n", zone_names[policy_zone]);
6084
#endif
L
Linus Torvalds 已提交
6085 6086
}

6087 6088 6089 6090 6091 6092 6093 6094
/* If zone is ZONE_MOVABLE but memory is mirrored, it is an overlapped init */
static bool __meminit
overlap_memmap_init(unsigned long zone, unsigned long *pfn)
{
	static struct memblock_region *r;

	if (mirrored_kernelcore && zone == ZONE_MOVABLE) {
		if (!r || *pfn >= memblock_region_memory_end_pfn(r)) {
6095
			for_each_mem_region(r) {
6096 6097 6098 6099 6100 6101 6102 6103 6104 6105 6106 6107 6108
				if (*pfn < memblock_region_memory_end_pfn(r))
					break;
			}
		}
		if (*pfn >= memblock_region_memory_base_pfn(r) &&
		    memblock_is_mirror(r)) {
			*pfn = memblock_region_memory_end_pfn(r);
			return true;
		}
	}
	return false;
}

L
Linus Torvalds 已提交
6109 6110
/*
 * Initially all pages are reserved - free ones are freed
6111
 * up by memblock_free_all() once the early boot process is
L
Linus Torvalds 已提交
6112
 * done. Non-atomic initialization, single-pass.
6113 6114 6115 6116
 *
 * All aligned pageblocks are initialized to the specified migratetype
 * (usually MIGRATE_MOVABLE). Besides setting the migratetype, no related
 * zone stats (e.g., nr_isolate_pageblock) are touched.
L
Linus Torvalds 已提交
6117
 */
6118
void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
6119 6120 6121
		unsigned long start_pfn,
		enum meminit_context context,
		struct vmem_altmap *altmap, int migratetype)
L
Linus Torvalds 已提交
6122
{
6123
	unsigned long pfn, end_pfn = start_pfn + size;
6124
	struct page *page;
L
Linus Torvalds 已提交
6125

6126 6127 6128
	if (highest_memmap_pfn < end_pfn - 1)
		highest_memmap_pfn = end_pfn - 1;

6129
#ifdef CONFIG_ZONE_DEVICE
6130 6131
	/*
	 * Honor reservation requested by the driver for this ZONE_DEVICE
6132 6133 6134 6135
	 * memory. We limit the total number of pages to initialize to just
	 * those that might contain the memory mapping. We will defer the
	 * ZONE_DEVICE page initialization until after we have released
	 * the hotplug lock.
6136
	 */
6137 6138 6139 6140 6141 6142 6143 6144 6145
	if (zone == ZONE_DEVICE) {
		if (!altmap)
			return;

		if (start_pfn == altmap->base_pfn)
			start_pfn += altmap->reserve;
		end_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
	}
#endif
6146

6147
	for (pfn = start_pfn; pfn < end_pfn; ) {
D
Dave Hansen 已提交
6148
		/*
6149 6150
		 * There can be holes in boot-time mem_map[]s handed to this
		 * function.  They do not exist on hotplugged memory.
D
Dave Hansen 已提交
6151
		 */
6152
		if (context == MEMINIT_EARLY) {
6153 6154 6155 6156
			if (overlap_memmap_init(zone, &pfn))
				continue;
			if (defer_init(nid, pfn, end_pfn))
				break;
D
Dave Hansen 已提交
6157
		}
6158

6159 6160
		page = pfn_to_page(pfn);
		__init_single_page(page, pfn, zone, nid);
6161
		if (context == MEMINIT_HOTPLUG)
6162
			__SetPageReserved(page);
6163

6164
		/*
6165 6166 6167
		 * Usually, we want to mark the pageblock MIGRATE_MOVABLE,
		 * such that unmovable allocations won't be scattered all
		 * over the place during system boot.
6168
		 */
6169
		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6170
			set_pageblock_migratetype(page, migratetype);
6171
			cond_resched();
6172
		}
6173
		pfn++;
L
Linus Torvalds 已提交
6174 6175 6176
	}
}

6177 6178 6179
#ifdef CONFIG_ZONE_DEVICE
void __ref memmap_init_zone_device(struct zone *zone,
				   unsigned long start_pfn,
6180
				   unsigned long nr_pages,
6181 6182
				   struct dev_pagemap *pgmap)
{
6183
	unsigned long pfn, end_pfn = start_pfn + nr_pages;
6184
	struct pglist_data *pgdat = zone->zone_pgdat;
6185
	struct vmem_altmap *altmap = pgmap_altmap(pgmap);
6186 6187 6188 6189
	unsigned long zone_idx = zone_idx(zone);
	unsigned long start = jiffies;
	int nid = pgdat->node_id;

D
Dan Williams 已提交
6190
	if (WARN_ON_ONCE(!pgmap || zone_idx(zone) != ZONE_DEVICE))
6191 6192 6193 6194 6195 6196 6197
		return;

	/*
	 * The call to memmap_init_zone should have already taken care
	 * of the pages reserved for the memmap, so we can just jump to
	 * the end of that region and start processing the device pages.
	 */
6198
	if (altmap) {
6199
		start_pfn = altmap->base_pfn + vmem_altmap_offset(altmap);
6200
		nr_pages = end_pfn - start_pfn;
6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217
	}

	for (pfn = start_pfn; pfn < end_pfn; pfn++) {
		struct page *page = pfn_to_page(pfn);

		__init_single_page(page, pfn, zone_idx, nid);

		/*
		 * Mark page reserved as it will need to wait for onlining
		 * phase for it to be fully associated with a zone.
		 *
		 * We can use the non-atomic __set_bit operation for setting
		 * the flag as we are still initializing the pages.
		 */
		__SetPageReserved(page);

		/*
6218 6219 6220
		 * ZONE_DEVICE pages union ->lru with a ->pgmap back pointer
		 * and zone_device_data.  It is a bug if a ZONE_DEVICE page is
		 * ever freed or placed on a driver-private list.
6221 6222
		 */
		page->pgmap = pgmap;
6223
		page->zone_device_data = NULL;
6224 6225 6226 6227 6228 6229 6230 6231

		/*
		 * Mark the block movable so that blocks are reserved for
		 * movable at startup. This will force kernel allocations
		 * to reserve their blocks rather than leaking throughout
		 * the address space during boot when many long-lived
		 * kernel allocations are made.
		 *
6232
		 * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
6233
		 * because this is done early in section_activate()
6234
		 */
6235
		if (IS_ALIGNED(pfn, pageblock_nr_pages)) {
6236 6237 6238 6239 6240
			set_pageblock_migratetype(page, MIGRATE_MOVABLE);
			cond_resched();
		}
	}

6241
	pr_info("%s initialised %lu pages in %ums\n", __func__,
6242
		nr_pages, jiffies_to_msecs(jiffies - start));
6243 6244 6245
}

#endif
6246
static void __meminit zone_init_free_lists(struct zone *zone)
L
Linus Torvalds 已提交
6247
{
6248
	unsigned int order, t;
6249 6250
	for_each_migratetype_order(order, t) {
		INIT_LIST_HEAD(&zone->free_area[order].free_list[t]);
L
Linus Torvalds 已提交
6251 6252 6253 6254
		zone->free_area[order].nr_free = 0;
	}
}

6255
void __meminit __weak memmap_init(unsigned long size, int nid,
6256 6257
				  unsigned long zone,
				  unsigned long range_start_pfn)
6258
{
6259 6260 6261 6262 6263 6264 6265 6266 6267 6268 6269
	unsigned long start_pfn, end_pfn;
	unsigned long range_end_pfn = range_start_pfn + size;
	int i;

	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);

		if (end_pfn > start_pfn) {
			size = end_pfn - start_pfn;
			memmap_init_zone(size, nid, zone, start_pfn,
6270
					 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
6271 6272
		}
	}
6273
}
L
Linus Torvalds 已提交
6274

6275
static int zone_batchsize(struct zone *zone)
6276
{
6277
#ifdef CONFIG_MMU
6278 6279 6280 6281
	int batch;

	/*
	 * The per-cpu-pages pools are set to around 1000th of the
6282
	 * size of the zone.
6283
	 */
6284
	batch = zone_managed_pages(zone) / 1024;
6285 6286 6287
	/* But no more than a meg. */
	if (batch * PAGE_SIZE > 1024 * 1024)
		batch = (1024 * 1024) / PAGE_SIZE;
6288 6289 6290 6291 6292
	batch /= 4;		/* We effectively *= 4 below */
	if (batch < 1)
		batch = 1;

	/*
6293 6294 6295
	 * Clamp the batch to a 2^n - 1 value. Having a power
	 * of 2 value was found to be more likely to have
	 * suboptimal cache aliasing properties in some cases.
6296
	 *
6297 6298 6299 6300
	 * For example if 2 tasks are alternately allocating
	 * batches of pages, one task can end up with a lot
	 * of pages of one half of the possible page colors
	 * and the other with pages of the other colors.
6301
	 */
6302
	batch = rounddown_pow_of_two(batch + batch/2) - 1;
6303

6304
	return batch;
6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321

#else
	/* The deferral and batching of frees should be suppressed under NOMMU
	 * conditions.
	 *
	 * The problem is that NOMMU needs to be able to allocate large chunks
	 * of contiguous memory as there's no hardware page translation to
	 * assemble apparent contiguous memory from discontiguous pages.
	 *
	 * Queueing large contiguous runs of pages for batching, however,
	 * causes the pages to actually be freed in smaller chunks.  As there
	 * can be a significant delay between the individual batches being
	 * recycled, this leads to the once large chunks of space being
	 * fragmented and becoming unavailable for high-order allocations.
	 */
	return 0;
#endif
6322 6323
}

6324
/*
6325 6326 6327
 * pcp->high and pcp->batch values are related and generally batch is lower
 * than high. They are also related to pcp->count such that count is lower
 * than high, and as soon as it reaches high, the pcplist is flushed.
6328
 *
6329 6330 6331 6332 6333 6334
 * However, guaranteeing these relations at all times would require e.g. write
 * barriers here but also careful usage of read barriers at the read side, and
 * thus be prone to error and bad for performance. Thus the update only prevents
 * store tearing. Any new users of pcp->batch and pcp->high should ensure they
 * can cope with those fields changing asynchronously, and fully trust only the
 * pcp->count field on the local CPU with interrupts disabled.
6335 6336 6337 6338 6339 6340 6341 6342
 *
 * mutex_is_locked(&pcp_batch_high_lock) required when calling this function
 * outside of boot time (or some other assurance that no concurrent updaters
 * exist).
 */
static void pageset_update(struct per_cpu_pages *pcp, unsigned long high,
		unsigned long batch)
{
6343 6344
	WRITE_ONCE(pcp->batch, batch);
	WRITE_ONCE(pcp->high, high);
6345 6346
}

6347
static void pageset_init(struct per_cpu_pageset *p)
6348 6349
{
	struct per_cpu_pages *pcp;
6350
	int migratetype;
6351

6352 6353
	memset(p, 0, sizeof(*p));

6354
	pcp = &p->pcp;
6355 6356
	for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
		INIT_LIST_HEAD(&pcp->lists[migratetype]);
6357

6358 6359 6360 6361 6362 6363
	/*
	 * Set batch and high values safe for a boot pageset. A true percpu
	 * pageset's initialization will update them subsequently. Here we don't
	 * need to be as careful as pageset_update() as nobody can access the
	 * pageset yet.
	 */
6364 6365
	pcp->high = BOOT_PAGESET_HIGH;
	pcp->batch = BOOT_PAGESET_BATCH;
6366 6367
}

6368
static void __zone_set_pageset_high_and_batch(struct zone *zone, unsigned long high,
6369 6370 6371 6372 6373 6374 6375 6376 6377 6378 6379
		unsigned long batch)
{
	struct per_cpu_pageset *p;
	int cpu;

	for_each_possible_cpu(cpu) {
		p = per_cpu_ptr(zone->pageset, cpu);
		pageset_update(&p->pcp, high, batch);
	}
}

6380
/*
6381
 * Calculate and set new high and batch values for all per-cpu pagesets of a
6382
 * zone, based on the zone's size and the percpu_pagelist_fraction sysctl.
6383
 */
6384
static void zone_set_pageset_high_and_batch(struct zone *zone)
6385
{
6386 6387 6388 6389 6390 6391 6392 6393 6394 6395 6396 6397
	unsigned long new_high, new_batch;

	if (percpu_pagelist_fraction) {
		new_high = zone_managed_pages(zone) / percpu_pagelist_fraction;
		new_batch = max(1UL, new_high / 4);
		if ((new_high / 4) > (PAGE_SHIFT * 8))
			new_batch = PAGE_SHIFT * 8;
	} else {
		new_batch = zone_batchsize(zone);
		new_high = 6 * new_batch;
		new_batch = max(1UL, 1 * new_batch);
	}
6398

6399 6400 6401 6402 6403 6404 6405
	if (zone->pageset_high == new_high &&
	    zone->pageset_batch == new_batch)
		return;

	zone->pageset_high = new_high;
	zone->pageset_batch = new_batch;

6406
	__zone_set_pageset_high_and_batch(zone, new_high, new_batch);
6407 6408
}

6409
void __meminit setup_zone_pageset(struct zone *zone)
6410
{
6411
	struct per_cpu_pageset *p;
6412
	int cpu;
6413

6414
	zone->pageset = alloc_percpu(struct per_cpu_pageset);
6415 6416 6417 6418 6419 6420
	for_each_possible_cpu(cpu) {
		p = per_cpu_ptr(zone->pageset, cpu);
		pageset_init(p);
	}

	zone_set_pageset_high_and_batch(zone);
6421 6422
}

6423
/*
6424 6425
 * Allocate per cpu pagesets and initialize them.
 * Before this call only boot pagesets were available.
6426
 */
6427
void __init setup_per_cpu_pageset(void)
6428
{
6429
	struct pglist_data *pgdat;
6430
	struct zone *zone;
6431
	int __maybe_unused cpu;
6432

6433 6434
	for_each_populated_zone(zone)
		setup_zone_pageset(zone);
6435

6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449
#ifdef CONFIG_NUMA
	/*
	 * Unpopulated zones continue using the boot pagesets.
	 * The numa stats for these pagesets need to be reset.
	 * Otherwise, they will end up skewing the stats of
	 * the nodes these zones are associated with.
	 */
	for_each_possible_cpu(cpu) {
		struct per_cpu_pageset *pcp = &per_cpu(boot_pageset, cpu);
		memset(pcp->vm_numa_stat_diff, 0,
		       sizeof(pcp->vm_numa_stat_diff));
	}
#endif

6450 6451 6452
	for_each_online_pgdat(pgdat)
		pgdat->per_cpu_nodestats =
			alloc_percpu(struct per_cpu_nodestat);
6453 6454
}

6455
static __meminit void zone_pcp_init(struct zone *zone)
6456
{
6457 6458 6459 6460 6461 6462
	/*
	 * per cpu subsystem is not up at this point. The following code
	 * relies on the ability of the linker to provide the
	 * offset of a (static) per cpu variable into the per cpu area.
	 */
	zone->pageset = &boot_pageset;
6463 6464
	zone->pageset_high = BOOT_PAGESET_HIGH;
	zone->pageset_batch = BOOT_PAGESET_BATCH;
6465

6466
	if (populated_zone(zone))
6467 6468 6469
		printk(KERN_DEBUG "  %s zone: %lu pages, LIFO batch:%u\n",
			zone->name, zone->present_pages,
					 zone_batchsize(zone));
6470 6471
}

6472
void __meminit init_currently_empty_zone(struct zone *zone,
6473
					unsigned long zone_start_pfn,
6474
					unsigned long size)
6475 6476
{
	struct pglist_data *pgdat = zone->zone_pgdat;
6477
	int zone_idx = zone_idx(zone) + 1;
6478

6479 6480
	if (zone_idx > pgdat->nr_zones)
		pgdat->nr_zones = zone_idx;
6481 6482 6483

	zone->zone_start_pfn = zone_start_pfn;

6484 6485 6486 6487 6488 6489
	mminit_dprintk(MMINIT_TRACE, "memmap_init",
			"Initialising map node %d zone %lu pfns %lu -> %lu\n",
			pgdat->node_id,
			(unsigned long)zone_idx(zone),
			zone_start_pfn, (zone_start_pfn + size));

6490
	zone_init_free_lists(zone);
6491
	zone->initialized = 1;
6492 6493
}

6494 6495
/**
 * get_pfn_range_for_nid - Return the start and end page frames for a node
6496 6497 6498
 * @nid: The nid to return the range for. If MAX_NUMNODES, the min and max PFN are returned.
 * @start_pfn: Passed by reference. On return, it will have the node start_pfn.
 * @end_pfn: Passed by reference. On return, it will have the node end_pfn.
6499 6500
 *
 * It returns the start and end page frame of a node based on information
6501
 * provided by memblock_set_node(). If called for a node
6502
 * with no available memory, a warning is printed and the start and end
6503
 * PFNs will be 0.
6504
 */
6505
void __init get_pfn_range_for_nid(unsigned int nid,
6506 6507
			unsigned long *start_pfn, unsigned long *end_pfn)
{
6508
	unsigned long this_start_pfn, this_end_pfn;
6509
	int i;
6510

6511 6512 6513
	*start_pfn = -1UL;
	*end_pfn = 0;

6514 6515 6516
	for_each_mem_pfn_range(i, nid, &this_start_pfn, &this_end_pfn, NULL) {
		*start_pfn = min(*start_pfn, this_start_pfn);
		*end_pfn = max(*end_pfn, this_end_pfn);
6517 6518
	}

6519
	if (*start_pfn == -1UL)
6520 6521 6522
		*start_pfn = 0;
}

M
Mel Gorman 已提交
6523 6524 6525 6526 6527
/*
 * This finds a zone that can be used for ZONE_MOVABLE pages. The
 * assumption is made that zones within a node are ordered in monotonic
 * increasing memory addresses so that the "highest" populated zone is used
 */
A
Adrian Bunk 已提交
6528
static void __init find_usable_zone_for_movable(void)
M
Mel Gorman 已提交
6529 6530 6531 6532 6533 6534 6535 6536 6537 6538 6539 6540 6541 6542 6543 6544 6545
{
	int zone_index;
	for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) {
		if (zone_index == ZONE_MOVABLE)
			continue;

		if (arch_zone_highest_possible_pfn[zone_index] >
				arch_zone_lowest_possible_pfn[zone_index])
			break;
	}

	VM_BUG_ON(zone_index == -1);
	movable_zone = zone_index;
}

/*
 * The zone ranges provided by the architecture do not include ZONE_MOVABLE
L
Lucas De Marchi 已提交
6546
 * because it is sized independent of architecture. Unlike the other zones,
M
Mel Gorman 已提交
6547 6548 6549 6550 6551 6552 6553
 * the starting point for ZONE_MOVABLE is not fixed. It may be different
 * in each node depending on the size of each node and how evenly kernelcore
 * is distributed. This helper function adjusts the zone ranges
 * provided by the architecture for a given node by using the end of the
 * highest usable zone for ZONE_MOVABLE. This preserves the assumption that
 * zones within a node are in order of monotonic increases memory addresses
 */
6554
static void __init adjust_zone_range_for_zone_movable(int nid,
M
Mel Gorman 已提交
6555 6556 6557 6558 6559 6560 6561 6562 6563 6564 6565 6566 6567 6568
					unsigned long zone_type,
					unsigned long node_start_pfn,
					unsigned long node_end_pfn,
					unsigned long *zone_start_pfn,
					unsigned long *zone_end_pfn)
{
	/* Only adjust if ZONE_MOVABLE is on this node */
	if (zone_movable_pfn[nid]) {
		/* Size ZONE_MOVABLE */
		if (zone_type == ZONE_MOVABLE) {
			*zone_start_pfn = zone_movable_pfn[nid];
			*zone_end_pfn = min(node_end_pfn,
				arch_zone_highest_possible_pfn[movable_zone]);

6569 6570 6571 6572 6573 6574
		/* Adjust for ZONE_MOVABLE starting within this range */
		} else if (!mirrored_kernelcore &&
			*zone_start_pfn < zone_movable_pfn[nid] &&
			*zone_end_pfn > zone_movable_pfn[nid]) {
			*zone_end_pfn = zone_movable_pfn[nid];

M
Mel Gorman 已提交
6575 6576 6577 6578 6579 6580
		/* Check if this whole range is within ZONE_MOVABLE */
		} else if (*zone_start_pfn >= zone_movable_pfn[nid])
			*zone_start_pfn = *zone_end_pfn;
	}
}

6581 6582 6583 6584
/*
 * Return the number of pages a zone spans in a node, including holes
 * present_pages = zone_spanned_pages_in_node() - zone_absent_pages_in_node()
 */
6585
static unsigned long __init zone_spanned_pages_in_node(int nid,
6586
					unsigned long zone_type,
6587 6588
					unsigned long node_start_pfn,
					unsigned long node_end_pfn,
6589
					unsigned long *zone_start_pfn,
6590
					unsigned long *zone_end_pfn)
6591
{
6592 6593
	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6594
	/* When hotadd a new node from cpu_up(), the node should be empty */
6595 6596 6597
	if (!node_start_pfn && !node_end_pfn)
		return 0;

6598
	/* Get the start and end of the zone */
6599 6600
	*zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
	*zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
M
Mel Gorman 已提交
6601 6602
	adjust_zone_range_for_zone_movable(nid, zone_type,
				node_start_pfn, node_end_pfn,
6603
				zone_start_pfn, zone_end_pfn);
6604 6605

	/* Check that this node has pages within the zone's required range */
6606
	if (*zone_end_pfn < node_start_pfn || *zone_start_pfn > node_end_pfn)
6607 6608 6609
		return 0;

	/* Move the zone boundaries inside the node if necessary */
6610 6611
	*zone_end_pfn = min(*zone_end_pfn, node_end_pfn);
	*zone_start_pfn = max(*zone_start_pfn, node_start_pfn);
6612 6613

	/* Return the spanned pages */
6614
	return *zone_end_pfn - *zone_start_pfn;
6615 6616 6617 6618
}

/*
 * Return the number of holes in a range on a node. If nid is MAX_NUMNODES,
6619
 * then all holes in the requested range will be accounted for.
6620
 */
6621
unsigned long __init __absent_pages_in_range(int nid,
6622 6623 6624
				unsigned long range_start_pfn,
				unsigned long range_end_pfn)
{
6625 6626 6627
	unsigned long nr_absent = range_end_pfn - range_start_pfn;
	unsigned long start_pfn, end_pfn;
	int i;
6628

6629 6630 6631 6632
	for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
		start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
		end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
		nr_absent -= end_pfn - start_pfn;
6633
	}
6634
	return nr_absent;
6635 6636 6637 6638 6639 6640 6641
}

/**
 * absent_pages_in_range - Return number of page frames in holes within a range
 * @start_pfn: The start PFN to start searching for holes
 * @end_pfn: The end PFN to stop searching for holes
 *
6642
 * Return: the number of pages frames in memory holes within a range.
6643 6644 6645 6646 6647 6648 6649 6650
 */
unsigned long __init absent_pages_in_range(unsigned long start_pfn,
							unsigned long end_pfn)
{
	return __absent_pages_in_range(MAX_NUMNODES, start_pfn, end_pfn);
}

/* Return the number of page frames in holes in a zone on a node */
6651
static unsigned long __init zone_absent_pages_in_node(int nid,
6652
					unsigned long zone_type,
6653
					unsigned long node_start_pfn,
6654
					unsigned long node_end_pfn)
6655
{
6656 6657
	unsigned long zone_low = arch_zone_lowest_possible_pfn[zone_type];
	unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
6658
	unsigned long zone_start_pfn, zone_end_pfn;
6659
	unsigned long nr_absent;
6660

6661
	/* When hotadd a new node from cpu_up(), the node should be empty */
6662 6663 6664
	if (!node_start_pfn && !node_end_pfn)
		return 0;

6665 6666
	zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
	zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
6667

M
Mel Gorman 已提交
6668 6669 6670
	adjust_zone_range_for_zone_movable(nid, zone_type,
			node_start_pfn, node_end_pfn,
			&zone_start_pfn, &zone_end_pfn);
6671 6672 6673 6674 6675 6676 6677
	nr_absent = __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);

	/*
	 * ZONE_MOVABLE handling.
	 * Treat pages to be ZONE_MOVABLE in ZONE_NORMAL as absent pages
	 * and vice versa.
	 */
6678 6679 6680 6681
	if (mirrored_kernelcore && zone_movable_pfn[nid]) {
		unsigned long start_pfn, end_pfn;
		struct memblock_region *r;

6682
		for_each_mem_region(r) {
6683 6684 6685 6686 6687 6688 6689 6690 6691 6692 6693 6694
			start_pfn = clamp(memblock_region_memory_base_pfn(r),
					  zone_start_pfn, zone_end_pfn);
			end_pfn = clamp(memblock_region_memory_end_pfn(r),
					zone_start_pfn, zone_end_pfn);

			if (zone_type == ZONE_MOVABLE &&
			    memblock_is_mirror(r))
				nr_absent += end_pfn - start_pfn;

			if (zone_type == ZONE_NORMAL &&
			    !memblock_is_mirror(r))
				nr_absent += end_pfn - start_pfn;
6695 6696 6697 6698
		}
	}

	return nr_absent;
6699
}
6700

6701
static void __init calculate_node_totalpages(struct pglist_data *pgdat,
6702
						unsigned long node_start_pfn,
6703
						unsigned long node_end_pfn)
6704
{
6705
	unsigned long realtotalpages = 0, totalpages = 0;
6706 6707
	enum zone_type i;

6708 6709
	for (i = 0; i < MAX_NR_ZONES; i++) {
		struct zone *zone = pgdat->node_zones + i;
6710
		unsigned long zone_start_pfn, zone_end_pfn;
6711
		unsigned long spanned, absent;
6712
		unsigned long size, real_size;
6713

6714 6715 6716 6717 6718 6719 6720 6721
		spanned = zone_spanned_pages_in_node(pgdat->node_id, i,
						     node_start_pfn,
						     node_end_pfn,
						     &zone_start_pfn,
						     &zone_end_pfn);
		absent = zone_absent_pages_in_node(pgdat->node_id, i,
						   node_start_pfn,
						   node_end_pfn);
6722 6723 6724 6725

		size = spanned;
		real_size = size - absent;

6726 6727 6728 6729
		if (size)
			zone->zone_start_pfn = zone_start_pfn;
		else
			zone->zone_start_pfn = 0;
6730 6731 6732 6733 6734 6735 6736 6737
		zone->spanned_pages = size;
		zone->present_pages = real_size;

		totalpages += size;
		realtotalpages += real_size;
	}

	pgdat->node_spanned_pages = totalpages;
6738 6739 6740 6741 6742
	pgdat->node_present_pages = realtotalpages;
	printk(KERN_DEBUG "On node %d totalpages: %lu\n", pgdat->node_id,
							realtotalpages);
}

6743 6744 6745
#ifndef CONFIG_SPARSEMEM
/*
 * Calculate the size of the zone->blockflags rounded to an unsigned long
6746 6747
 * Start by making sure zonesize is a multiple of pageblock_order by rounding
 * up. Then use 1 NR_PAGEBLOCK_BITS worth of bits per pageblock, finally
6748 6749 6750
 * round what is now in bits to nearest long in bits, then return it in
 * bytes.
 */
6751
static unsigned long __init usemap_size(unsigned long zone_start_pfn, unsigned long zonesize)
6752 6753 6754
{
	unsigned long usemapsize;

6755
	zonesize += zone_start_pfn & (pageblock_nr_pages-1);
6756 6757
	usemapsize = roundup(zonesize, pageblock_nr_pages);
	usemapsize = usemapsize >> pageblock_order;
6758 6759 6760 6761 6762 6763
	usemapsize *= NR_PAGEBLOCK_BITS;
	usemapsize = roundup(usemapsize, 8 * sizeof(unsigned long));

	return usemapsize / 8;
}

P
Pavel Tatashin 已提交
6764
static void __ref setup_usemap(struct pglist_data *pgdat,
6765 6766 6767
				struct zone *zone,
				unsigned long zone_start_pfn,
				unsigned long zonesize)
6768
{
6769
	unsigned long usemapsize = usemap_size(zone_start_pfn, zonesize);
6770
	zone->pageblock_flags = NULL;
6771
	if (usemapsize) {
6772
		zone->pageblock_flags =
6773 6774
			memblock_alloc_node(usemapsize, SMP_CACHE_BYTES,
					    pgdat->node_id);
6775 6776 6777 6778
		if (!zone->pageblock_flags)
			panic("Failed to allocate %ld bytes for zone %s pageblock flags on node %d\n",
			      usemapsize, zone->name, pgdat->node_id);
	}
6779 6780
}
#else
6781 6782
static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone,
				unsigned long zone_start_pfn, unsigned long zonesize) {}
6783 6784
#endif /* CONFIG_SPARSEMEM */

6785
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
6786

6787
/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
6788
void __init set_pageblock_order(void)
6789
{
6790 6791
	unsigned int order;

6792 6793 6794 6795
	/* Check that pageblock_nr_pages has not already been setup */
	if (pageblock_order)
		return;

6796 6797 6798 6799 6800
	if (HPAGE_SHIFT > PAGE_SHIFT)
		order = HUGETLB_PAGE_ORDER;
	else
		order = MAX_ORDER - 1;

6801 6802
	/*
	 * Assume the largest contiguous order of interest is a huge page.
6803 6804
	 * This value may be variable depending on boot parameters on IA64 and
	 * powerpc.
6805 6806 6807 6808 6809
	 */
	pageblock_order = order;
}
#else /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */

6810 6811
/*
 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
6812 6813 6814
 * is unused as pageblock_order is set at compile-time. See
 * include/linux/pageblock-flags.h for the values of pageblock_order based on
 * the kernel config
6815
 */
6816
void __init set_pageblock_order(void)
6817 6818
{
}
6819 6820 6821

#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */

6822
static unsigned long __init calc_memmap_size(unsigned long spanned_pages,
P
Pavel Tatashin 已提交
6823
						unsigned long present_pages)
6824 6825 6826 6827 6828 6829 6830 6831
{
	unsigned long pages = spanned_pages;

	/*
	 * Provide a more accurate estimation if there are holes within
	 * the zone and SPARSEMEM is in use. If there are holes within the
	 * zone, each populated memory region may cost us one or two extra
	 * memmap pages due to alignment because memmap pages for each
6832
	 * populated regions may not be naturally aligned on page boundary.
6833 6834 6835 6836 6837 6838 6839 6840 6841
	 * So the (present_pages >> 4) heuristic is a tradeoff for that.
	 */
	if (spanned_pages > present_pages + (present_pages >> 4) &&
	    IS_ENABLED(CONFIG_SPARSEMEM))
		pages = present_pages;

	return PAGE_ALIGN(pages * sizeof(struct page)) >> PAGE_SHIFT;
}

6842 6843 6844
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void pgdat_init_split_queue(struct pglist_data *pgdat)
{
6845 6846 6847 6848 6849
	struct deferred_split *ds_queue = &pgdat->deferred_split_queue;

	spin_lock_init(&ds_queue->split_queue_lock);
	INIT_LIST_HEAD(&ds_queue->split_queue);
	ds_queue->split_queue_len = 0;
6850 6851 6852 6853 6854 6855 6856 6857 6858 6859 6860 6861 6862 6863
}
#else
static void pgdat_init_split_queue(struct pglist_data *pgdat) {}
#endif

#ifdef CONFIG_COMPACTION
static void pgdat_init_kcompactd(struct pglist_data *pgdat)
{
	init_waitqueue_head(&pgdat->kcompactd_wait);
}
#else
static void pgdat_init_kcompactd(struct pglist_data *pgdat) {}
#endif

6864
static void __meminit pgdat_init_internals(struct pglist_data *pgdat)
L
Linus Torvalds 已提交
6865
{
6866
	pgdat_resize_init(pgdat);
6867 6868 6869 6870

	pgdat_init_split_queue(pgdat);
	pgdat_init_kcompactd(pgdat);

L
Linus Torvalds 已提交
6871
	init_waitqueue_head(&pgdat->kswapd_wait);
6872
	init_waitqueue_head(&pgdat->pfmemalloc_wait);
6873

6874
	pgdat_page_ext_init(pgdat);
6875
	lruvec_init(&pgdat->__lruvec);
6876 6877 6878 6879 6880
}

static void __meminit zone_init_internals(struct zone *zone, enum zone_type idx, int nid,
							unsigned long remaining_pages)
{
6881
	atomic_long_set(&zone->managed_pages, remaining_pages);
6882 6883 6884 6885 6886 6887 6888 6889 6890 6891 6892 6893 6894 6895 6896 6897 6898 6899 6900 6901 6902 6903 6904 6905 6906 6907 6908 6909 6910 6911 6912 6913 6914 6915 6916 6917 6918 6919 6920 6921
	zone_set_nid(zone, nid);
	zone->name = zone_names[idx];
	zone->zone_pgdat = NODE_DATA(nid);
	spin_lock_init(&zone->lock);
	zone_seqlock_init(zone);
	zone_pcp_init(zone);
}

/*
 * Set up the zone data structures
 * - init pgdat internals
 * - init all zones belonging to this node
 *
 * NOTE: this function is only called during memory hotplug
 */
#ifdef CONFIG_MEMORY_HOTPLUG
void __ref free_area_init_core_hotplug(int nid)
{
	enum zone_type z;
	pg_data_t *pgdat = NODE_DATA(nid);

	pgdat_init_internals(pgdat);
	for (z = 0; z < MAX_NR_ZONES; z++)
		zone_init_internals(&pgdat->node_zones[z], z, nid, 0);
}
#endif

/*
 * Set up the zone data structures:
 *   - mark all pages reserved
 *   - mark all memory queues empty
 *   - clear the memory bitmaps
 *
 * NOTE: pgdat should get zeroed by caller.
 * NOTE: this function is only called during early init.
 */
static void __init free_area_init_core(struct pglist_data *pgdat)
{
	enum zone_type j;
	int nid = pgdat->node_id;
6922

6923
	pgdat_init_internals(pgdat);
6924 6925
	pgdat->per_cpu_nodestats = &boot_nodestats;

L
Linus Torvalds 已提交
6926 6927
	for (j = 0; j < MAX_NR_ZONES; j++) {
		struct zone *zone = pgdat->node_zones + j;
6928
		unsigned long size, freesize, memmap_pages;
6929
		unsigned long zone_start_pfn = zone->zone_start_pfn;
L
Linus Torvalds 已提交
6930

6931
		size = zone->spanned_pages;
6932
		freesize = zone->present_pages;
L
Linus Torvalds 已提交
6933

6934
		/*
6935
		 * Adjust freesize so that it accounts for how much memory
6936 6937 6938
		 * is used by this zone for memmap. This affects the watermark
		 * and per-cpu initialisations
		 */
6939
		memmap_pages = calc_memmap_size(size, freesize);
6940 6941 6942 6943 6944 6945 6946 6947
		if (!is_highmem_idx(j)) {
			if (freesize >= memmap_pages) {
				freesize -= memmap_pages;
				if (memmap_pages)
					printk(KERN_DEBUG
					       "  %s zone: %lu pages used for memmap\n",
					       zone_names[j], memmap_pages);
			} else
6948
				pr_warn("  %s zone: %lu pages exceeds freesize %lu\n",
6949 6950
					zone_names[j], memmap_pages, freesize);
		}
6951

6952
		/* Account for reserved pages */
6953 6954
		if (j == 0 && freesize > dma_reserve) {
			freesize -= dma_reserve;
Y
Yinghai Lu 已提交
6955
			printk(KERN_DEBUG "  %s zone: %lu pages reserved\n",
6956
					zone_names[0], dma_reserve);
6957 6958
		}

6959
		if (!is_highmem_idx(j))
6960
			nr_kernel_pages += freesize;
6961 6962 6963
		/* Charge for highmem memmap if there are enough kernel pages */
		else if (nr_kernel_pages > memmap_pages * 2)
			nr_kernel_pages -= memmap_pages;
6964
		nr_all_pages += freesize;
L
Linus Torvalds 已提交
6965

6966 6967 6968 6969 6970
		/*
		 * Set an approximate value for lowmem here, it will be adjusted
		 * when the bootmem allocator frees pages into the buddy system.
		 * And all highmem pages will be managed by the buddy system.
		 */
6971
		zone_init_internals(zone, j, nid, freesize);
6972

6973
		if (!size)
L
Linus Torvalds 已提交
6974 6975
			continue;

6976
		set_pageblock_order();
6977 6978
		setup_usemap(pgdat, zone, zone_start_pfn, size);
		init_currently_empty_zone(zone, zone_start_pfn, size);
6979
		memmap_init(size, nid, j, zone_start_pfn);
L
Linus Torvalds 已提交
6980 6981 6982
	}
}

6983
#ifdef CONFIG_FLAT_NODE_MEM_MAP
6984
static void __ref alloc_node_mem_map(struct pglist_data *pgdat)
L
Linus Torvalds 已提交
6985
{
6986
	unsigned long __maybe_unused start = 0;
L
Laura Abbott 已提交
6987 6988
	unsigned long __maybe_unused offset = 0;

L
Linus Torvalds 已提交
6989 6990 6991 6992
	/* Skip empty nodes */
	if (!pgdat->node_spanned_pages)
		return;

6993 6994
	start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
	offset = pgdat->node_start_pfn - start;
L
Linus Torvalds 已提交
6995 6996
	/* ia64 gets its own node_mem_map, before this, without bootmem */
	if (!pgdat->node_mem_map) {
6997
		unsigned long size, end;
A
Andy Whitcroft 已提交
6998 6999
		struct page *map;

7000 7001 7002 7003 7004
		/*
		 * The zone's endpoints aren't required to be MAX_ORDER
		 * aligned but the node_mem_map endpoints must be in order
		 * for the buddy allocator to function correctly.
		 */
7005
		end = pgdat_end_pfn(pgdat);
7006 7007
		end = ALIGN(end, MAX_ORDER_NR_PAGES);
		size =  (end - start) * sizeof(struct page);
7008 7009
		map = memblock_alloc_node(size, SMP_CACHE_BYTES,
					  pgdat->node_id);
7010 7011 7012
		if (!map)
			panic("Failed to allocate %ld bytes for node %d memory map\n",
			      size, pgdat->node_id);
L
Laura Abbott 已提交
7013
		pgdat->node_mem_map = map + offset;
L
Linus Torvalds 已提交
7014
	}
7015 7016 7017
	pr_debug("%s: node %d, pgdat %08lx, node_mem_map %08lx\n",
				__func__, pgdat->node_id, (unsigned long)pgdat,
				(unsigned long)pgdat->node_mem_map);
7018
#ifndef CONFIG_NEED_MULTIPLE_NODES
L
Linus Torvalds 已提交
7019 7020 7021
	/*
	 * With no DISCONTIG, the global mem_map is just set as node 0's
	 */
7022
	if (pgdat == NODE_DATA(0)) {
L
Linus Torvalds 已提交
7023
		mem_map = NODE_DATA(0)->node_mem_map;
7024
		if (page_to_pfn(mem_map) != pgdat->node_start_pfn)
L
Laura Abbott 已提交
7025
			mem_map -= offset;
7026
	}
L
Linus Torvalds 已提交
7027 7028
#endif
}
7029 7030 7031
#else
static void __ref alloc_node_mem_map(struct pglist_data *pgdat) { }
#endif /* CONFIG_FLAT_NODE_MEM_MAP */
L
Linus Torvalds 已提交
7032

7033 7034 7035 7036 7037 7038 7039 7040 7041
#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
static inline void pgdat_set_deferred_range(pg_data_t *pgdat)
{
	pgdat->first_deferred_pfn = ULONG_MAX;
}
#else
static inline void pgdat_set_deferred_range(pg_data_t *pgdat) {}
#endif

7042
static void __init free_area_init_node(int nid)
L
Linus Torvalds 已提交
7043
{
7044
	pg_data_t *pgdat = NODE_DATA(nid);
7045 7046
	unsigned long start_pfn = 0;
	unsigned long end_pfn = 0;
7047

7048
	/* pg_data_t should be reset to zero when it's allocated */
7049
	WARN_ON(pgdat->nr_zones || pgdat->kswapd_highest_zoneidx);
7050

7051
	get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
7052

L
Linus Torvalds 已提交
7053
	pgdat->node_id = nid;
7054
	pgdat->node_start_pfn = start_pfn;
7055
	pgdat->per_cpu_nodestats = NULL;
7056

7057
	pr_info("Initmem setup node %d [mem %#018Lx-%#018Lx]\n", nid,
7058 7059
		(u64)start_pfn << PAGE_SHIFT,
		end_pfn ? ((u64)end_pfn << PAGE_SHIFT) - 1 : 0);
7060
	calculate_node_totalpages(pgdat, start_pfn, end_pfn);
L
Linus Torvalds 已提交
7061 7062

	alloc_node_mem_map(pgdat);
7063
	pgdat_set_deferred_range(pgdat);
L
Linus Torvalds 已提交
7064

7065
	free_area_init_core(pgdat);
L
Linus Torvalds 已提交
7066 7067
}

7068
void __init free_area_init_memoryless_node(int nid)
7069
{
7070
	free_area_init_node(nid);
7071 7072
}

M
Mike Rapoport 已提交
7073
#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
7074
/*
7075 7076
 * Initialize all valid struct pages in the range [spfn, epfn) and mark them
 * PageReserved(). Return the number of struct pages that were initialized.
7077
 */
7078
static u64 __init init_unavailable_range(unsigned long spfn, unsigned long epfn)
7079 7080 7081 7082 7083 7084 7085 7086 7087 7088
{
	unsigned long pfn;
	u64 pgcnt = 0;

	for (pfn = spfn; pfn < epfn; pfn++) {
		if (!pfn_valid(ALIGN_DOWN(pfn, pageblock_nr_pages))) {
			pfn = ALIGN_DOWN(pfn, pageblock_nr_pages)
				+ pageblock_nr_pages - 1;
			continue;
		}
7089 7090 7091 7092 7093 7094 7095
		/*
		 * Use a fake node/zone (0) for now. Some of these pages
		 * (in memblock.reserved but not in memblock.memory) will
		 * get re-initialized via reserve_bootmem_region() later.
		 */
		__init_single_page(pfn_to_page(pfn), pfn, 0, 0);
		__SetPageReserved(pfn_to_page(pfn));
7096 7097 7098 7099 7100 7101
		pgcnt++;
	}

	return pgcnt;
}

7102 7103 7104 7105 7106
/*
 * Only struct pages that are backed by physical memory are zeroed and
 * initialized by going through __init_single_page(). But, there are some
 * struct pages which are reserved in memblock allocator and their fields
 * may be accessed (for example page_to_pfn() on some configuration accesses
7107
 * flags). We must explicitly initialize those struct pages.
7108 7109 7110 7111
 *
 * This function also addresses a similar issue where struct pages are left
 * uninitialized because the physical address range is not covered by
 * memblock.memory or memblock.reserved. That could happen when memblock
7112 7113
 * layout is manually configured via memmap=, or when the highest physical
 * address (max_pfn) does not end on a section boundary.
7114
 */
7115
static void __init init_unavailable_mem(void)
7116 7117 7118
{
	phys_addr_t start, end;
	u64 i, pgcnt;
7119
	phys_addr_t next = 0;
7120 7121

	/*
7122
	 * Loop through unavailable ranges not covered by memblock.memory.
7123 7124
	 */
	pgcnt = 0;
7125
	for_each_mem_range(i, &start, &end) {
7126
		if (next < start)
7127 7128
			pgcnt += init_unavailable_range(PFN_DOWN(next),
							PFN_UP(start));
7129 7130
		next = end;
	}
7131 7132 7133 7134 7135 7136 7137 7138

	/*
	 * Early sections always have a fully populated memmap for the whole
	 * section - see pfn_valid(). If the last section has holes at the
	 * end and that section is marked "online", the memmap will be
	 * considered initialized. Make sure that memmap has a well defined
	 * state.
	 */
7139 7140
	pgcnt += init_unavailable_range(PFN_DOWN(next),
					round_up(max_pfn, PAGES_PER_SECTION));
7141

7142 7143 7144 7145 7146
	/*
	 * Struct pages that do not have backing memory. This could be because
	 * firmware is using some of this memory, or for some other reasons.
	 */
	if (pgcnt)
7147
		pr_info("Zeroed struct page in unavailable ranges: %lld pages", pgcnt);
7148
}
7149 7150 7151 7152
#else
static inline void __init init_unavailable_mem(void)
{
}
M
Mike Rapoport 已提交
7153
#endif /* !CONFIG_FLAT_NODE_MEM_MAP */
7154

M
Miklos Szeredi 已提交
7155 7156 7157 7158
#if MAX_NUMNODES > 1
/*
 * Figure out the number of possible node ids.
 */
7159
void __init setup_nr_node_ids(void)
M
Miklos Szeredi 已提交
7160
{
7161
	unsigned int highest;
M
Miklos Szeredi 已提交
7162

7163
	highest = find_last_bit(node_possible_map.bits, MAX_NUMNODES);
M
Miklos Szeredi 已提交
7164 7165 7166 7167
	nr_node_ids = highest + 1;
}
#endif

7168 7169 7170 7171 7172 7173 7174 7175 7176 7177 7178 7179 7180 7181 7182 7183
/**
 * node_map_pfn_alignment - determine the maximum internode alignment
 *
 * This function should be called after node map is populated and sorted.
 * It calculates the maximum power of two alignment which can distinguish
 * all the nodes.
 *
 * For example, if all nodes are 1GiB and aligned to 1GiB, the return value
 * would indicate 1GiB alignment with (1 << (30 - PAGE_SHIFT)).  If the
 * nodes are shifted by 256MiB, 256MiB.  Note that if only the last node is
 * shifted, 1GiB is enough and this function will indicate so.
 *
 * This is used to test whether pfn -> nid mapping of the chosen memory
 * model has fine enough granularity to avoid incorrect mapping for the
 * populated node map.
 *
7184
 * Return: the determined alignment in pfn's.  0 if there is no alignment
7185 7186 7187 7188 7189
 * requirement (single node).
 */
unsigned long __init node_map_pfn_alignment(void)
{
	unsigned long accl_mask = 0, last_end = 0;
7190
	unsigned long start, end, mask;
7191
	int last_nid = NUMA_NO_NODE;
7192
	int i, nid;
7193

7194
	for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) {
7195 7196 7197 7198 7199 7200 7201 7202 7203 7204 7205 7206 7207 7208 7209 7210 7211 7212 7213 7214 7215 7216 7217
		if (!start || last_nid < 0 || last_nid == nid) {
			last_nid = nid;
			last_end = end;
			continue;
		}

		/*
		 * Start with a mask granular enough to pin-point to the
		 * start pfn and tick off bits one-by-one until it becomes
		 * too coarse to separate the current node from the last.
		 */
		mask = ~((1 << __ffs(start)) - 1);
		while (mask && last_end <= (start & (mask << 1)))
			mask <<= 1;

		/* accumulate all internode masks */
		accl_mask |= mask;
	}

	/* convert mask to number of pages */
	return ~accl_mask + 1;
}

7218 7219 7220
/**
 * find_min_pfn_with_active_regions - Find the minimum PFN registered
 *
7221
 * Return: the minimum PFN based on information provided via
7222
 * memblock_set_node().
7223 7224 7225
 */
unsigned long __init find_min_pfn_with_active_regions(void)
{
7226
	return PHYS_PFN(memblock_start_of_DRAM());
7227 7228
}

7229 7230 7231
/*
 * early_calculate_totalpages()
 * Sum pages in active regions for movable zone.
7232
 * Populate N_MEMORY for calculating usable_nodes.
7233
 */
A
Adrian Bunk 已提交
7234
static unsigned long __init early_calculate_totalpages(void)
7235 7236
{
	unsigned long totalpages = 0;
7237 7238 7239 7240 7241
	unsigned long start_pfn, end_pfn;
	int i, nid;

	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
		unsigned long pages = end_pfn - start_pfn;
7242

7243 7244
		totalpages += pages;
		if (pages)
7245
			node_set_state(nid, N_MEMORY);
7246
	}
7247
	return totalpages;
7248 7249
}

M
Mel Gorman 已提交
7250 7251 7252 7253 7254 7255
/*
 * Find the PFN the Movable zone begins in each node. Kernel memory
 * is spread evenly between nodes as long as the nodes have enough
 * memory. When they don't, some nodes will have more kernelcore than
 * others
 */
7256
static void __init find_zone_movable_pfns_for_nodes(void)
M
Mel Gorman 已提交
7257 7258 7259 7260
{
	int i, nid;
	unsigned long usable_startpfn;
	unsigned long kernelcore_node, kernelcore_remaining;
7261
	/* save the state before borrow the nodemask */
7262
	nodemask_t saved_node_state = node_states[N_MEMORY];
7263
	unsigned long totalpages = early_calculate_totalpages();
7264
	int usable_nodes = nodes_weight(node_states[N_MEMORY]);
E
Emil Medve 已提交
7265
	struct memblock_region *r;
7266 7267 7268 7269 7270 7271 7272 7273 7274

	/* Need to find movable_zone earlier when movable_node is specified. */
	find_usable_zone_for_movable();

	/*
	 * If movable_node is specified, ignore kernelcore and movablecore
	 * options.
	 */
	if (movable_node_is_enabled()) {
7275
		for_each_mem_region(r) {
E
Emil Medve 已提交
7276
			if (!memblock_is_hotpluggable(r))
7277 7278
				continue;

7279
			nid = memblock_get_region_node(r);
7280

E
Emil Medve 已提交
7281
			usable_startpfn = PFN_DOWN(r->base);
7282 7283 7284 7285 7286 7287 7288
			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
				min(usable_startpfn, zone_movable_pfn[nid]) :
				usable_startpfn;
		}

		goto out2;
	}
M
Mel Gorman 已提交
7289

7290 7291 7292 7293 7294 7295
	/*
	 * If kernelcore=mirror is specified, ignore movablecore option
	 */
	if (mirrored_kernelcore) {
		bool mem_below_4gb_not_mirrored = false;

7296
		for_each_mem_region(r) {
7297 7298 7299
			if (memblock_is_mirror(r))
				continue;

7300
			nid = memblock_get_region_node(r);
7301 7302 7303 7304 7305 7306 7307 7308 7309 7310 7311 7312 7313 7314

			usable_startpfn = memblock_region_memory_base_pfn(r);

			if (usable_startpfn < 0x100000) {
				mem_below_4gb_not_mirrored = true;
				continue;
			}

			zone_movable_pfn[nid] = zone_movable_pfn[nid] ?
				min(usable_startpfn, zone_movable_pfn[nid]) :
				usable_startpfn;
		}

		if (mem_below_4gb_not_mirrored)
C
Chen Tao 已提交
7315
			pr_warn("This configuration results in unmirrored kernel memory.\n");
7316 7317 7318 7319

		goto out2;
	}

7320
	/*
7321 7322 7323 7324 7325 7326 7327 7328 7329 7330 7331 7332
	 * If kernelcore=nn% or movablecore=nn% was specified, calculate the
	 * amount of necessary memory.
	 */
	if (required_kernelcore_percent)
		required_kernelcore = (totalpages * 100 * required_kernelcore_percent) /
				       10000UL;
	if (required_movablecore_percent)
		required_movablecore = (totalpages * 100 * required_movablecore_percent) /
					10000UL;

	/*
	 * If movablecore= was specified, calculate what size of
7333 7334 7335 7336 7337 7338 7339 7340 7341 7342 7343 7344 7345 7346 7347
	 * kernelcore that corresponds so that memory usable for
	 * any allocation type is evenly spread. If both kernelcore
	 * and movablecore are specified, then the value of kernelcore
	 * will be used for required_kernelcore if it's greater than
	 * what movablecore would have allowed.
	 */
	if (required_movablecore) {
		unsigned long corepages;

		/*
		 * Round-up so that ZONE_MOVABLE is at least as large as what
		 * was requested by the user
		 */
		required_movablecore =
			roundup(required_movablecore, MAX_ORDER_NR_PAGES);
7348
		required_movablecore = min(totalpages, required_movablecore);
7349 7350 7351 7352 7353
		corepages = totalpages - required_movablecore;

		required_kernelcore = max(required_kernelcore, corepages);
	}

7354 7355 7356 7357 7358
	/*
	 * If kernelcore was not specified or kernelcore size is larger
	 * than totalpages, there is no ZONE_MOVABLE.
	 */
	if (!required_kernelcore || required_kernelcore >= totalpages)
7359
		goto out;
M
Mel Gorman 已提交
7360 7361 7362 7363 7364 7365 7366

	/* usable_startpfn is the lowest possible pfn ZONE_MOVABLE can be at */
	usable_startpfn = arch_zone_lowest_possible_pfn[movable_zone];

restart:
	/* Spread kernelcore memory as evenly as possible throughout nodes */
	kernelcore_node = required_kernelcore / usable_nodes;
7367
	for_each_node_state(nid, N_MEMORY) {
7368 7369
		unsigned long start_pfn, end_pfn;

M
Mel Gorman 已提交
7370 7371 7372 7373 7374 7375 7376 7377 7378 7379 7380 7381 7382 7383 7384 7385
		/*
		 * Recalculate kernelcore_node if the division per node
		 * now exceeds what is necessary to satisfy the requested
		 * amount of memory for the kernel
		 */
		if (required_kernelcore < kernelcore_node)
			kernelcore_node = required_kernelcore / usable_nodes;

		/*
		 * As the map is walked, we track how much memory is usable
		 * by the kernel using kernelcore_remaining. When it is
		 * 0, the rest of the node is usable by ZONE_MOVABLE
		 */
		kernelcore_remaining = kernelcore_node;

		/* Go through each range of PFNs within this node */
7386
		for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
M
Mel Gorman 已提交
7387 7388
			unsigned long size_pages;

7389
			start_pfn = max(start_pfn, zone_movable_pfn[nid]);
M
Mel Gorman 已提交
7390 7391 7392 7393 7394 7395 7396 7397 7398 7399 7400 7401 7402 7403 7404 7405 7406 7407 7408 7409 7410 7411 7412 7413 7414 7415 7416 7417 7418 7419 7420 7421 7422 7423 7424 7425 7426 7427 7428 7429 7430 7431
			if (start_pfn >= end_pfn)
				continue;

			/* Account for what is only usable for kernelcore */
			if (start_pfn < usable_startpfn) {
				unsigned long kernel_pages;
				kernel_pages = min(end_pfn, usable_startpfn)
								- start_pfn;

				kernelcore_remaining -= min(kernel_pages,
							kernelcore_remaining);
				required_kernelcore -= min(kernel_pages,
							required_kernelcore);

				/* Continue if range is now fully accounted */
				if (end_pfn <= usable_startpfn) {

					/*
					 * Push zone_movable_pfn to the end so
					 * that if we have to rebalance
					 * kernelcore across nodes, we will
					 * not double account here
					 */
					zone_movable_pfn[nid] = end_pfn;
					continue;
				}
				start_pfn = usable_startpfn;
			}

			/*
			 * The usable PFN range for ZONE_MOVABLE is from
			 * start_pfn->end_pfn. Calculate size_pages as the
			 * number of pages used as kernelcore
			 */
			size_pages = end_pfn - start_pfn;
			if (size_pages > kernelcore_remaining)
				size_pages = kernelcore_remaining;
			zone_movable_pfn[nid] = start_pfn + size_pages;

			/*
			 * Some kernelcore has been met, update counts and
			 * break if the kernelcore for this node has been
7432
			 * satisfied
M
Mel Gorman 已提交
7433 7434 7435 7436 7437 7438 7439 7440 7441 7442 7443 7444 7445
			 */
			required_kernelcore -= min(required_kernelcore,
								size_pages);
			kernelcore_remaining -= size_pages;
			if (!kernelcore_remaining)
				break;
		}
	}

	/*
	 * If there is still required_kernelcore, we do another pass with one
	 * less node in the count. This will push zone_movable_pfn[nid] further
	 * along on the nodes that still have memory until kernelcore is
7446
	 * satisfied
M
Mel Gorman 已提交
7447 7448 7449 7450 7451
	 */
	usable_nodes--;
	if (usable_nodes && required_kernelcore > usable_nodes)
		goto restart;

7452
out2:
M
Mel Gorman 已提交
7453 7454 7455 7456
	/* Align start of ZONE_MOVABLE on all nids to MAX_ORDER_NR_PAGES */
	for (nid = 0; nid < MAX_NUMNODES; nid++)
		zone_movable_pfn[nid] =
			roundup(zone_movable_pfn[nid], MAX_ORDER_NR_PAGES);
7457

7458
out:
7459
	/* restore the node_state */
7460
	node_states[N_MEMORY] = saved_node_state;
M
Mel Gorman 已提交
7461 7462
}

7463 7464
/* Any regular or high memory on that node ? */
static void check_for_memory(pg_data_t *pgdat, int nid)
7465 7466 7467
{
	enum zone_type zone_type;

7468
	for (zone_type = 0; zone_type <= ZONE_MOVABLE - 1; zone_type++) {
7469
		struct zone *zone = &pgdat->node_zones[zone_type];
7470
		if (populated_zone(zone)) {
7471 7472 7473
			if (IS_ENABLED(CONFIG_HIGHMEM))
				node_set_state(nid, N_HIGH_MEMORY);
			if (zone_type <= ZONE_NORMAL)
7474
				node_set_state(nid, N_NORMAL_MEMORY);
7475 7476
			break;
		}
7477 7478 7479
	}
}

7480 7481 7482 7483 7484 7485 7486 7487 7488
/*
 * Some architecturs, e.g. ARC may have ZONE_HIGHMEM below ZONE_NORMAL. For
 * such cases we allow max_zone_pfn sorted in the descending order
 */
bool __weak arch_has_descending_max_zone_pfns(void)
{
	return false;
}

7489
/**
7490
 * free_area_init - Initialise all pg_data_t and zone data
7491
 * @max_zone_pfn: an array of max PFNs for each zone
7492 7493
 *
 * This will call free_area_init_node() for each active node in the system.
7494
 * Using the page ranges provided by memblock_set_node(), the size of each
7495 7496 7497 7498 7499 7500 7501
 * zone in each node and their holes is calculated. If the maximum PFN
 * between two adjacent zones match, it is assumed that the zone is empty.
 * For example, if arch_max_dma_pfn == arch_max_dma32_pfn, it is assumed
 * that arch_max_dma32_pfn has no pages. It is also assumed that a zone
 * starts where the previous one ended. For example, ZONE_DMA32 starts
 * at arch_max_dma_pfn.
 */
7502
void __init free_area_init(unsigned long *max_zone_pfn)
7503
{
7504
	unsigned long start_pfn, end_pfn;
7505 7506
	int i, nid, zone;
	bool descending;
7507

7508 7509 7510 7511 7512
	/* Record where the zone boundaries are */
	memset(arch_zone_lowest_possible_pfn, 0,
				sizeof(arch_zone_lowest_possible_pfn));
	memset(arch_zone_highest_possible_pfn, 0,
				sizeof(arch_zone_highest_possible_pfn));
7513 7514

	start_pfn = find_min_pfn_with_active_regions();
7515
	descending = arch_has_descending_max_zone_pfns();
7516 7517

	for (i = 0; i < MAX_NR_ZONES; i++) {
7518 7519 7520 7521 7522 7523
		if (descending)
			zone = MAX_NR_ZONES - i - 1;
		else
			zone = i;

		if (zone == ZONE_MOVABLE)
M
Mel Gorman 已提交
7524
			continue;
7525

7526 7527 7528
		end_pfn = max(max_zone_pfn[zone], start_pfn);
		arch_zone_lowest_possible_pfn[zone] = start_pfn;
		arch_zone_highest_possible_pfn[zone] = end_pfn;
7529 7530

		start_pfn = end_pfn;
7531
	}
M
Mel Gorman 已提交
7532 7533 7534

	/* Find the PFNs that ZONE_MOVABLE begins at in each node */
	memset(zone_movable_pfn, 0, sizeof(zone_movable_pfn));
7535
	find_zone_movable_pfns_for_nodes();
7536 7537

	/* Print out the zone ranges */
7538
	pr_info("Zone ranges:\n");
M
Mel Gorman 已提交
7539 7540 7541
	for (i = 0; i < MAX_NR_ZONES; i++) {
		if (i == ZONE_MOVABLE)
			continue;
7542
		pr_info("  %-8s ", zone_names[i]);
7543 7544
		if (arch_zone_lowest_possible_pfn[i] ==
				arch_zone_highest_possible_pfn[i])
7545
			pr_cont("empty\n");
7546
		else
7547 7548 7549 7550
			pr_cont("[mem %#018Lx-%#018Lx]\n",
				(u64)arch_zone_lowest_possible_pfn[i]
					<< PAGE_SHIFT,
				((u64)arch_zone_highest_possible_pfn[i]
7551
					<< PAGE_SHIFT) - 1);
M
Mel Gorman 已提交
7552 7553 7554
	}

	/* Print out the PFNs ZONE_MOVABLE begins at in each node */
7555
	pr_info("Movable zone start for each node\n");
M
Mel Gorman 已提交
7556 7557
	for (i = 0; i < MAX_NUMNODES; i++) {
		if (zone_movable_pfn[i])
7558 7559
			pr_info("  Node %d: %#018Lx\n", i,
			       (u64)zone_movable_pfn[i] << PAGE_SHIFT);
M
Mel Gorman 已提交
7560
	}
7561

7562 7563 7564 7565 7566
	/*
	 * Print out the early node map, and initialize the
	 * subsection-map relative to active online memory ranges to
	 * enable future "sub-section" extensions of the memory map.
	 */
7567
	pr_info("Early memory node ranges\n");
7568
	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
7569 7570 7571
		pr_info("  node %3d: [mem %#018Lx-%#018Lx]\n", nid,
			(u64)start_pfn << PAGE_SHIFT,
			((u64)end_pfn << PAGE_SHIFT) - 1);
7572 7573
		subsection_map_init(start_pfn, end_pfn - start_pfn);
	}
7574 7575

	/* Initialise every node */
7576
	mminit_verify_pageflags_layout();
7577
	setup_nr_node_ids();
7578
	init_unavailable_mem();
7579 7580
	for_each_online_node(nid) {
		pg_data_t *pgdat = NODE_DATA(nid);
7581
		free_area_init_node(nid);
7582 7583 7584

		/* Any memory on that node */
		if (pgdat->node_present_pages)
7585 7586
			node_set_state(nid, N_MEMORY);
		check_for_memory(pgdat, nid);
7587 7588
	}
}
M
Mel Gorman 已提交
7589

7590 7591
static int __init cmdline_parse_core(char *p, unsigned long *core,
				     unsigned long *percent)
M
Mel Gorman 已提交
7592 7593
{
	unsigned long long coremem;
7594 7595
	char *endptr;

M
Mel Gorman 已提交
7596 7597 7598
	if (!p)
		return -EINVAL;

7599 7600 7601 7602 7603
	/* Value may be a percentage of total memory, otherwise bytes */
	coremem = simple_strtoull(p, &endptr, 0);
	if (*endptr == '%') {
		/* Paranoid check for percent values greater than 100 */
		WARN_ON(coremem > 100);
M
Mel Gorman 已提交
7604

7605 7606 7607 7608 7609
		*percent = coremem;
	} else {
		coremem = memparse(p, &p);
		/* Paranoid check that UL is enough for the coremem value */
		WARN_ON((coremem >> PAGE_SHIFT) > ULONG_MAX);
M
Mel Gorman 已提交
7610

7611 7612 7613
		*core = coremem >> PAGE_SHIFT;
		*percent = 0UL;
	}
M
Mel Gorman 已提交
7614 7615
	return 0;
}
M
Mel Gorman 已提交
7616

7617 7618 7619 7620 7621 7622
/*
 * kernelcore=size sets the amount of memory for use for allocations that
 * cannot be reclaimed or migrated.
 */
static int __init cmdline_parse_kernelcore(char *p)
{
7623 7624 7625 7626 7627 7628
	/* parse kernelcore=mirror */
	if (parse_option_str(p, "mirror")) {
		mirrored_kernelcore = true;
		return 0;
	}

7629 7630
	return cmdline_parse_core(p, &required_kernelcore,
				  &required_kernelcore_percent);
7631 7632 7633 7634 7635 7636 7637 7638
}

/*
 * movablecore=size sets the amount of memory for use for allocations that
 * can be reclaimed or migrated.
 */
static int __init cmdline_parse_movablecore(char *p)
{
7639 7640
	return cmdline_parse_core(p, &required_movablecore,
				  &required_movablecore_percent);
7641 7642
}

M
Mel Gorman 已提交
7643
early_param("kernelcore", cmdline_parse_kernelcore);
7644
early_param("movablecore", cmdline_parse_movablecore);
M
Mel Gorman 已提交
7645

7646 7647
void adjust_managed_page_count(struct page *page, long count)
{
7648
	atomic_long_add(count, &page_zone(page)->managed_pages);
7649
	totalram_pages_add(count);
7650 7651
#ifdef CONFIG_HIGHMEM
	if (PageHighMem(page))
7652
		totalhigh_pages_add(count);
7653
#endif
7654
}
7655
EXPORT_SYMBOL(adjust_managed_page_count);
7656

7657
unsigned long free_reserved_area(void *start, void *end, int poison, const char *s)
7658
{
7659 7660
	void *pos;
	unsigned long pages = 0;
7661

7662 7663 7664
	start = (void *)PAGE_ALIGN((unsigned long)start);
	end = (void *)((unsigned long)end & PAGE_MASK);
	for (pos = start; pos < end; pos += PAGE_SIZE, pages++) {
7665 7666 7667 7668 7669 7670 7671 7672 7673 7674 7675
		struct page *page = virt_to_page(pos);
		void *direct_map_addr;

		/*
		 * 'direct_map_addr' might be different from 'pos'
		 * because some architectures' virt_to_page()
		 * work with aliases.  Getting the direct map
		 * address ensures that we get a _writeable_
		 * alias for the memset().
		 */
		direct_map_addr = page_address(page);
7676 7677 7678 7679 7680
		/*
		 * Perform a kasan-unchecked memset() since this memory
		 * has not been initialized.
		 */
		direct_map_addr = kasan_reset_tag(direct_map_addr);
7681
		if ((unsigned int)poison <= 0xFF)
7682 7683 7684
			memset(direct_map_addr, poison, PAGE_SIZE);

		free_reserved_page(page);
7685 7686 7687
	}

	if (pages && s)
7688 7689
		pr_info("Freeing %s memory: %ldK\n",
			s, pages << (PAGE_SHIFT - 10));
7690 7691 7692 7693

	return pages;
}

7694 7695 7696 7697
#ifdef	CONFIG_HIGHMEM
void free_highmem_page(struct page *page)
{
	__free_reserved_page(page);
7698
	totalram_pages_inc();
7699
	atomic_long_inc(&page_zone(page)->managed_pages);
7700
	totalhigh_pages_inc();
7701 7702 7703
}
#endif

7704 7705 7706 7707 7708 7709 7710 7711 7712 7713 7714 7715 7716 7717 7718 7719 7720 7721 7722 7723 7724 7725

void __init mem_init_print_info(const char *str)
{
	unsigned long physpages, codesize, datasize, rosize, bss_size;
	unsigned long init_code_size, init_data_size;

	physpages = get_num_physpages();
	codesize = _etext - _stext;
	datasize = _edata - _sdata;
	rosize = __end_rodata - __start_rodata;
	bss_size = __bss_stop - __bss_start;
	init_data_size = __init_end - __init_begin;
	init_code_size = _einittext - _sinittext;

	/*
	 * Detect special cases and adjust section sizes accordingly:
	 * 1) .init.* may be embedded into .data sections
	 * 2) .init.text.* may be out of [__init_begin, __init_end],
	 *    please refer to arch/tile/kernel/vmlinux.lds.S.
	 * 3) .rodata.* may be embedded into .text or .data sections.
	 */
#define adj_init_size(start, end, size, pos, adj) \
7726 7727 7728 7729
	do { \
		if (start <= pos && pos < end && size > adj) \
			size -= adj; \
	} while (0)
7730 7731 7732 7733 7734 7735 7736 7737 7738 7739

	adj_init_size(__init_begin, __init_end, init_data_size,
		     _sinittext, init_code_size);
	adj_init_size(_stext, _etext, codesize, _sinittext, init_code_size);
	adj_init_size(_sdata, _edata, datasize, __init_begin, init_data_size);
	adj_init_size(_stext, _etext, codesize, __start_rodata, rosize);
	adj_init_size(_sdata, _edata, datasize, __start_rodata, rosize);

#undef	adj_init_size

J
Joe Perches 已提交
7740
	pr_info("Memory: %luK/%luK available (%luK kernel code, %luK rwdata, %luK rodata, %luK init, %luK bss, %luK reserved, %luK cma-reserved"
7741
#ifdef	CONFIG_HIGHMEM
J
Joe Perches 已提交
7742
		", %luK highmem"
7743
#endif
J
Joe Perches 已提交
7744 7745 7746 7747 7748
		"%s%s)\n",
		nr_free_pages() << (PAGE_SHIFT - 10),
		physpages << (PAGE_SHIFT - 10),
		codesize >> 10, datasize >> 10, rosize >> 10,
		(init_data_size + init_code_size) >> 10, bss_size >> 10,
7749
		(physpages - totalram_pages() - totalcma_pages) << (PAGE_SHIFT - 10),
J
Joe Perches 已提交
7750
		totalcma_pages << (PAGE_SHIFT - 10),
7751
#ifdef	CONFIG_HIGHMEM
7752
		totalhigh_pages() << (PAGE_SHIFT - 10),
7753
#endif
J
Joe Perches 已提交
7754
		str ? ", " : "", str ? str : "");
7755 7756
}

7757
/**
7758 7759
 * set_dma_reserve - set the specified number of pages reserved in the first zone
 * @new_dma_reserve: The number of pages to mark reserved
7760
 *
7761
 * The per-cpu batchsize and zone watermarks are determined by managed_pages.
7762 7763
 * In the DMA zone, a significant percentage may be consumed by kernel image
 * and other unfreeable allocations which can skew the watermarks badly. This
7764 7765 7766
 * function may optionally be used to account for unfreeable pages in the
 * first zone (e.g., ZONE_DMA). The effect will be lower watermarks and
 * smaller per-cpu batchsize.
7767 7768 7769 7770 7771 7772
 */
void __init set_dma_reserve(unsigned long new_dma_reserve)
{
	dma_reserve = new_dma_reserve;
}

7773
static int page_alloc_cpu_dead(unsigned int cpu)
L
Linus Torvalds 已提交
7774 7775
{

7776 7777
	lru_add_drain_cpu(cpu);
	drain_pages(cpu);
7778

7779 7780 7781 7782 7783 7784 7785
	/*
	 * Spill the event counters of the dead processor
	 * into the current processors event counters.
	 * This artificially elevates the count of the current
	 * processor.
	 */
	vm_events_fold_cpu(cpu);
7786

7787 7788 7789 7790 7791 7792 7793 7794 7795
	/*
	 * Zero the differential counters of the dead processor
	 * so that the vm statistics are consistent.
	 *
	 * This is only okay since the processor is dead and cannot
	 * race with what we are doing.
	 */
	cpu_vm_stats_fold(cpu);
	return 0;
L
Linus Torvalds 已提交
7796 7797
}

7798 7799 7800 7801 7802 7803 7804 7805 7806 7807 7808 7809 7810
#ifdef CONFIG_NUMA
int hashdist = HASHDIST_DEFAULT;

static int __init set_hashdist(char *str)
{
	if (!str)
		return 0;
	hashdist = simple_strtoul(str, &str, 0);
	return 1;
}
__setup("hashdist=", set_hashdist);
#endif

L
Linus Torvalds 已提交
7811 7812
void __init page_alloc_init(void)
{
7813 7814
	int ret;

7815 7816 7817 7818 7819
#ifdef CONFIG_NUMA
	if (num_node_state(N_MEMORY) == 1)
		hashdist = 0;
#endif

7820 7821 7822 7823
	ret = cpuhp_setup_state_nocalls(CPUHP_PAGE_ALLOC_DEAD,
					"mm/page_alloc:dead", NULL,
					page_alloc_cpu_dead);
	WARN_ON(ret < 0);
L
Linus Torvalds 已提交
7824 7825
}

7826
/*
7827
 * calculate_totalreserve_pages - called when sysctl_lowmem_reserve_ratio
7828 7829 7830 7831 7832 7833
 *	or min_free_kbytes changes.
 */
static void calculate_totalreserve_pages(void)
{
	struct pglist_data *pgdat;
	unsigned long reserve_pages = 0;
7834
	enum zone_type i, j;
7835 7836

	for_each_online_pgdat(pgdat) {
7837 7838 7839

		pgdat->totalreserve_pages = 0;

7840 7841
		for (i = 0; i < MAX_NR_ZONES; i++) {
			struct zone *zone = pgdat->node_zones + i;
7842
			long max = 0;
7843
			unsigned long managed_pages = zone_managed_pages(zone);
7844 7845 7846 7847 7848 7849 7850

			/* Find valid and maximum lowmem_reserve in the zone */
			for (j = i; j < MAX_NR_ZONES; j++) {
				if (zone->lowmem_reserve[j] > max)
					max = zone->lowmem_reserve[j];
			}

7851 7852
			/* we treat the high watermark as reserved pages. */
			max += high_wmark_pages(zone);
7853

7854 7855
			if (max > managed_pages)
				max = managed_pages;
7856

7857
			pgdat->totalreserve_pages += max;
7858

7859 7860 7861 7862 7863 7864
			reserve_pages += max;
		}
	}
	totalreserve_pages = reserve_pages;
}

L
Linus Torvalds 已提交
7865 7866
/*
 * setup_per_zone_lowmem_reserve - called whenever
7867
 *	sysctl_lowmem_reserve_ratio changes.  Ensures that each zone
L
Linus Torvalds 已提交
7868 7869 7870 7871 7872 7873
 *	has a correct pages reserved value, so an adequate number of
 *	pages are left in the zone after a successful __alloc_pages().
 */
static void setup_per_zone_lowmem_reserve(void)
{
	struct pglist_data *pgdat;
7874
	enum zone_type i, j;
L
Linus Torvalds 已提交
7875

7876
	for_each_online_pgdat(pgdat) {
7877 7878 7879 7880 7881 7882 7883 7884 7885
		for (i = 0; i < MAX_NR_ZONES - 1; i++) {
			struct zone *zone = &pgdat->node_zones[i];
			int ratio = sysctl_lowmem_reserve_ratio[i];
			bool clear = !ratio || !zone_managed_pages(zone);
			unsigned long managed_pages = 0;

			for (j = i + 1; j < MAX_NR_ZONES; j++) {
				if (clear) {
					zone->lowmem_reserve[j] = 0;
7886
				} else {
7887 7888 7889 7890
					struct zone *upper_zone = &pgdat->node_zones[j];

					managed_pages += zone_managed_pages(upper_zone);
					zone->lowmem_reserve[j] = managed_pages / ratio;
7891
				}
L
Linus Torvalds 已提交
7892 7893 7894
			}
		}
	}
7895 7896 7897

	/* update totalreserve_pages */
	calculate_totalreserve_pages();
L
Linus Torvalds 已提交
7898 7899
}

7900
static void __setup_per_zone_wmarks(void)
L
Linus Torvalds 已提交
7901 7902 7903 7904 7905 7906 7907 7908 7909
{
	unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
	unsigned long lowmem_pages = 0;
	struct zone *zone;
	unsigned long flags;

	/* Calculate total number of !ZONE_HIGHMEM pages */
	for_each_zone(zone) {
		if (!is_highmem(zone))
7910
			lowmem_pages += zone_managed_pages(zone);
L
Linus Torvalds 已提交
7911 7912 7913
	}

	for_each_zone(zone) {
7914 7915
		u64 tmp;

7916
		spin_lock_irqsave(&zone->lock, flags);
7917
		tmp = (u64)pages_min * zone_managed_pages(zone);
7918
		do_div(tmp, lowmem_pages);
L
Linus Torvalds 已提交
7919 7920
		if (is_highmem(zone)) {
			/*
N
Nick Piggin 已提交
7921 7922 7923 7924
			 * __GFP_HIGH and PF_MEMALLOC allocations usually don't
			 * need highmem pages, so cap pages_min to a small
			 * value here.
			 *
7925
			 * The WMARK_HIGH-WMARK_LOW and (WMARK_LOW-WMARK_MIN)
W
Wei Yang 已提交
7926
			 * deltas control async page reclaim, and so should
N
Nick Piggin 已提交
7927
			 * not be capped for highmem.
L
Linus Torvalds 已提交
7928
			 */
7929
			unsigned long min_pages;
L
Linus Torvalds 已提交
7930

7931
			min_pages = zone_managed_pages(zone) / 1024;
7932
			min_pages = clamp(min_pages, SWAP_CLUSTER_MAX, 128UL);
7933
			zone->_watermark[WMARK_MIN] = min_pages;
L
Linus Torvalds 已提交
7934
		} else {
N
Nick Piggin 已提交
7935 7936
			/*
			 * If it's a lowmem zone, reserve a number of pages
L
Linus Torvalds 已提交
7937 7938
			 * proportionate to the zone's size.
			 */
7939
			zone->_watermark[WMARK_MIN] = tmp;
L
Linus Torvalds 已提交
7940 7941
		}

7942 7943 7944 7945 7946 7947
		/*
		 * Set the kswapd watermarks distance according to the
		 * scale factor in proportion to available memory, but
		 * ensure a minimum size on small systems.
		 */
		tmp = max_t(u64, tmp >> 2,
7948
			    mult_frac(zone_managed_pages(zone),
7949 7950
				      watermark_scale_factor, 10000));

7951
		zone->watermark_boost = 0;
7952 7953
		zone->_watermark[WMARK_LOW]  = min_wmark_pages(zone) + tmp;
		zone->_watermark[WMARK_HIGH] = min_wmark_pages(zone) + tmp * 2;
7954

7955
		spin_unlock_irqrestore(&zone->lock, flags);
L
Linus Torvalds 已提交
7956
	}
7957 7958 7959

	/* update totalreserve_pages */
	calculate_totalreserve_pages();
L
Linus Torvalds 已提交
7960 7961
}

7962 7963 7964 7965 7966 7967 7968 7969 7970
/**
 * setup_per_zone_wmarks - called when min_free_kbytes changes
 * or when memory is hot-{added|removed}
 *
 * Ensures that the watermark[min,low,high] values for each zone are set
 * correctly with respect to min_free_kbytes.
 */
void setup_per_zone_wmarks(void)
{
7971 7972 7973
	static DEFINE_SPINLOCK(lock);

	spin_lock(&lock);
7974
	__setup_per_zone_wmarks();
7975
	spin_unlock(&lock);
7976 7977
}

L
Linus Torvalds 已提交
7978 7979 7980 7981
/*
 * Initialise min_free_kbytes.
 *
 * For small machines we want it small (128k min).  For large machines
7982
 * we want it large (256MB max).  But it is not linear, because network
L
Linus Torvalds 已提交
7983 7984
 * bandwidth does not increase linearly with machine size.  We use
 *
7985
 *	min_free_kbytes = 4 * sqrt(lowmem_kbytes), for better accuracy:
L
Linus Torvalds 已提交
7986 7987 7988 7989 7990 7991 7992 7993 7994 7995 7996 7997 7998 7999 8000 8001
 *	min_free_kbytes = sqrt(lowmem_kbytes * 16)
 *
 * which yields
 *
 * 16MB:	512k
 * 32MB:	724k
 * 64MB:	1024k
 * 128MB:	1448k
 * 256MB:	2048k
 * 512MB:	2896k
 * 1024MB:	4096k
 * 2048MB:	5792k
 * 4096MB:	8192k
 * 8192MB:	11584k
 * 16384MB:	16384k
 */
8002
int __meminit init_per_zone_wmark_min(void)
L
Linus Torvalds 已提交
8003 8004
{
	unsigned long lowmem_kbytes;
8005
	int new_min_free_kbytes;
L
Linus Torvalds 已提交
8006 8007

	lowmem_kbytes = nr_free_buffer_pages() * (PAGE_SIZE >> 10);
8008 8009 8010 8011 8012 8013
	new_min_free_kbytes = int_sqrt(lowmem_kbytes * 16);

	if (new_min_free_kbytes > user_min_free_kbytes) {
		min_free_kbytes = new_min_free_kbytes;
		if (min_free_kbytes < 128)
			min_free_kbytes = 128;
8014 8015
		if (min_free_kbytes > 262144)
			min_free_kbytes = 262144;
8016 8017 8018 8019
	} else {
		pr_warn("min_free_kbytes is not updated to %d because user defined value %d is preferred\n",
				new_min_free_kbytes, user_min_free_kbytes);
	}
8020
	setup_per_zone_wmarks();
8021
	refresh_zone_stat_thresholds();
L
Linus Torvalds 已提交
8022
	setup_per_zone_lowmem_reserve();
8023 8024 8025 8026 8027 8028

#ifdef CONFIG_NUMA
	setup_min_unmapped_ratio();
	setup_min_slab_ratio();
#endif

8029 8030
	khugepaged_min_free_kbytes_update();

L
Linus Torvalds 已提交
8031 8032
	return 0;
}
8033
postcore_initcall(init_per_zone_wmark_min)
L
Linus Torvalds 已提交
8034 8035

/*
8036
 * min_free_kbytes_sysctl_handler - just a wrapper around proc_dointvec() so
L
Linus Torvalds 已提交
8037 8038 8039
 *	that we can call two helper functions whenever min_free_kbytes
 *	changes.
 */
8040
int min_free_kbytes_sysctl_handler(struct ctl_table *table, int write,
8041
		void *buffer, size_t *length, loff_t *ppos)
L
Linus Torvalds 已提交
8042
{
8043 8044 8045 8046 8047 8048
	int rc;

	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
	if (rc)
		return rc;

8049 8050
	if (write) {
		user_min_free_kbytes = min_free_kbytes;
8051
		setup_per_zone_wmarks();
8052
	}
L
Linus Torvalds 已提交
8053 8054 8055
	return 0;
}

8056
int watermark_scale_factor_sysctl_handler(struct ctl_table *table, int write,
8057
		void *buffer, size_t *length, loff_t *ppos)
8058 8059 8060 8061 8062 8063 8064 8065 8066 8067 8068 8069 8070
{
	int rc;

	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
	if (rc)
		return rc;

	if (write)
		setup_per_zone_wmarks();

	return 0;
}

8071
#ifdef CONFIG_NUMA
8072
static void setup_min_unmapped_ratio(void)
8073
{
8074
	pg_data_t *pgdat;
8075 8076
	struct zone *zone;

8077
	for_each_online_pgdat(pgdat)
8078
		pgdat->min_unmapped_pages = 0;
8079

8080
	for_each_zone(zone)
8081 8082
		zone->zone_pgdat->min_unmapped_pages += (zone_managed_pages(zone) *
						         sysctl_min_unmapped_ratio) / 100;
8083
}
8084

8085 8086

int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *table, int write,
8087
		void *buffer, size_t *length, loff_t *ppos)
8088 8089 8090
{
	int rc;

8091
	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
8092 8093 8094
	if (rc)
		return rc;

8095 8096 8097 8098 8099 8100 8101 8102 8103 8104
	setup_min_unmapped_ratio();

	return 0;
}

static void setup_min_slab_ratio(void)
{
	pg_data_t *pgdat;
	struct zone *zone;

8105 8106 8107
	for_each_online_pgdat(pgdat)
		pgdat->min_slab_pages = 0;

8108
	for_each_zone(zone)
8109 8110
		zone->zone_pgdat->min_slab_pages += (zone_managed_pages(zone) *
						     sysctl_min_slab_ratio) / 100;
8111 8112 8113
}

int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *table, int write,
8114
		void *buffer, size_t *length, loff_t *ppos)
8115 8116 8117 8118 8119 8120 8121 8122 8123
{
	int rc;

	rc = proc_dointvec_minmax(table, write, buffer, length, ppos);
	if (rc)
		return rc;

	setup_min_slab_ratio();

8124 8125
	return 0;
}
8126 8127
#endif

L
Linus Torvalds 已提交
8128 8129 8130 8131 8132 8133
/*
 * lowmem_reserve_ratio_sysctl_handler - just a wrapper around
 *	proc_dointvec() so that we can call setup_per_zone_lowmem_reserve()
 *	whenever sysctl_lowmem_reserve_ratio changes.
 *
 * The reserve ratio obviously has absolutely no relation with the
8134
 * minimum watermarks. The lowmem reserve ratio can only make sense
L
Linus Torvalds 已提交
8135 8136
 * if in function of the boot time zone sizes.
 */
8137
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *table, int write,
8138
		void *buffer, size_t *length, loff_t *ppos)
L
Linus Torvalds 已提交
8139
{
8140 8141
	int i;

8142
	proc_dointvec_minmax(table, write, buffer, length, ppos);
8143 8144 8145 8146 8147 8148

	for (i = 0; i < MAX_NR_ZONES; i++) {
		if (sysctl_lowmem_reserve_ratio[i] < 1)
			sysctl_lowmem_reserve_ratio[i] = 0;
	}

L
Linus Torvalds 已提交
8149 8150 8151 8152
	setup_per_zone_lowmem_reserve();
	return 0;
}

8153 8154
/*
 * percpu_pagelist_fraction - changes the pcp->high for each zone on each
8155 8156
 * cpu.  It is the fraction of total pages in each zone that a hot per cpu
 * pagelist can have before it gets flushed back to buddy allocator.
8157
 */
8158
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
8159
		void *buffer, size_t *length, loff_t *ppos)
8160 8161
{
	struct zone *zone;
8162
	int old_percpu_pagelist_fraction;
8163 8164
	int ret;

8165 8166 8167
	mutex_lock(&pcp_batch_high_lock);
	old_percpu_pagelist_fraction = percpu_pagelist_fraction;

8168
	ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
8169 8170 8171 8172 8173 8174 8175 8176 8177 8178 8179 8180 8181 8182
	if (!write || ret < 0)
		goto out;

	/* Sanity checking to avoid pcp imbalance */
	if (percpu_pagelist_fraction &&
	    percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
		percpu_pagelist_fraction = old_percpu_pagelist_fraction;
		ret = -EINVAL;
		goto out;
	}

	/* No change? */
	if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
		goto out;
8183

8184
	for_each_populated_zone(zone)
8185
		zone_set_pageset_high_and_batch(zone);
8186
out:
8187
	mutex_unlock(&pcp_batch_high_lock);
8188
	return ret;
8189 8190
}

8191 8192 8193 8194 8195 8196 8197 8198 8199 8200 8201
#ifndef __HAVE_ARCH_RESERVED_KERNEL_PAGES
/*
 * Returns the number of pages that arch has reserved but
 * is not known to alloc_large_system_hash().
 */
static unsigned long __init arch_reserved_kernel_pages(void)
{
	return 0;
}
#endif

P
Pavel Tatashin 已提交
8202 8203 8204 8205 8206 8207 8208 8209 8210 8211 8212 8213 8214 8215 8216
/*
 * Adaptive scale is meant to reduce sizes of hash tables on large memory
 * machines. As memory size is increased the scale is also increased but at
 * slower pace.  Starting from ADAPT_SCALE_BASE (64G), every time memory
 * quadruples the scale is increased by one, which means the size of hash table
 * only doubles, instead of quadrupling as well.
 * Because 32-bit systems cannot have large physical memory, where this scaling
 * makes sense, it is disabled on such platforms.
 */
#if __BITS_PER_LONG > 32
#define ADAPT_SCALE_BASE	(64ul << 30)
#define ADAPT_SCALE_SHIFT	2
#define ADAPT_SCALE_NPAGES	(ADAPT_SCALE_BASE >> PAGE_SHIFT)
#endif

L
Linus Torvalds 已提交
8217 8218 8219 8220 8221 8222 8223 8224 8225 8226 8227 8228 8229
/*
 * allocate a large system hash table from bootmem
 * - it is assumed that the hash table must contain an exact power-of-2
 *   quantity of entries
 * - limit is the number of hash buckets, not the total allocation size
 */
void *__init alloc_large_system_hash(const char *tablename,
				     unsigned long bucketsize,
				     unsigned long numentries,
				     int scale,
				     int flags,
				     unsigned int *_hash_shift,
				     unsigned int *_hash_mask,
8230 8231
				     unsigned long low_limit,
				     unsigned long high_limit)
L
Linus Torvalds 已提交
8232
{
8233
	unsigned long long max = high_limit;
L
Linus Torvalds 已提交
8234 8235
	unsigned long log2qty, size;
	void *table = NULL;
8236
	gfp_t gfp_flags;
8237
	bool virt;
L
Linus Torvalds 已提交
8238 8239 8240 8241

	/* allow the kernel cmdline to have a say */
	if (!numentries) {
		/* round applicable memory size up to nearest megabyte */
A
Andrew Morton 已提交
8242
		numentries = nr_kernel_pages;
8243
		numentries -= arch_reserved_kernel_pages();
8244 8245 8246 8247

		/* It isn't necessary when PAGE_SIZE >= 1MB */
		if (PAGE_SHIFT < 20)
			numentries = round_up(numentries, (1<<20)/PAGE_SIZE);
L
Linus Torvalds 已提交
8248

P
Pavel Tatashin 已提交
8249 8250 8251 8252 8253 8254 8255 8256 8257 8258
#if __BITS_PER_LONG > 32
		if (!high_limit) {
			unsigned long adapt;

			for (adapt = ADAPT_SCALE_NPAGES; adapt < numentries;
			     adapt <<= ADAPT_SCALE_SHIFT)
				scale++;
		}
#endif

L
Linus Torvalds 已提交
8259 8260 8261 8262 8263
		/* limit to 1 bucket per 2^scale bytes of low memory */
		if (scale > PAGE_SHIFT)
			numentries >>= (scale - PAGE_SHIFT);
		else
			numentries <<= (PAGE_SHIFT - scale);
8264 8265

		/* Make sure we've got at least a 0-order allocation.. */
8266 8267 8268 8269 8270 8271 8272 8273
		if (unlikely(flags & HASH_SMALL)) {
			/* Makes no sense without HASH_EARLY */
			WARN_ON(!(flags & HASH_EARLY));
			if (!(numentries >> *_hash_shift)) {
				numentries = 1UL << *_hash_shift;
				BUG_ON(!numentries);
			}
		} else if (unlikely((numentries * bucketsize) < PAGE_SIZE))
8274
			numentries = PAGE_SIZE / bucketsize;
L
Linus Torvalds 已提交
8275
	}
8276
	numentries = roundup_pow_of_two(numentries);
L
Linus Torvalds 已提交
8277 8278 8279 8280 8281 8282

	/* limit allocation size to 1/16 total memory by default */
	if (max == 0) {
		max = ((unsigned long long)nr_all_pages << PAGE_SHIFT) >> 4;
		do_div(max, bucketsize);
	}
8283
	max = min(max, 0x80000000ULL);
L
Linus Torvalds 已提交
8284

8285 8286
	if (numentries < low_limit)
		numentries = low_limit;
L
Linus Torvalds 已提交
8287 8288 8289
	if (numentries > max)
		numentries = max;

8290
	log2qty = ilog2(numentries);
L
Linus Torvalds 已提交
8291

8292
	gfp_flags = (flags & HASH_ZERO) ? GFP_ATOMIC | __GFP_ZERO : GFP_ATOMIC;
L
Linus Torvalds 已提交
8293
	do {
8294
		virt = false;
L
Linus Torvalds 已提交
8295
		size = bucketsize << log2qty;
8296 8297
		if (flags & HASH_EARLY) {
			if (flags & HASH_ZERO)
8298
				table = memblock_alloc(size, SMP_CACHE_BYTES);
8299
			else
8300 8301
				table = memblock_alloc_raw(size,
							   SMP_CACHE_BYTES);
8302
		} else if (get_order(size) >= MAX_ORDER || hashdist) {
8303
			table = __vmalloc(size, gfp_flags);
8304
			virt = true;
8305
		} else {
8306 8307
			/*
			 * If bucketsize is not a power-of-two, we may free
8308 8309
			 * some pages at the end of hash table which
			 * alloc_pages_exact() automatically does
8310
			 */
8311 8312
			table = alloc_pages_exact(size, gfp_flags);
			kmemleak_alloc(table, size, 1, gfp_flags);
L
Linus Torvalds 已提交
8313 8314 8315 8316 8317 8318
		}
	} while (!table && size > PAGE_SIZE && --log2qty);

	if (!table)
		panic("Failed to allocate %s hash table\n", tablename);

8319 8320 8321
	pr_info("%s hash table entries: %ld (order: %d, %lu bytes, %s)\n",
		tablename, 1UL << log2qty, ilog2(size) - PAGE_SHIFT, size,
		virt ? "vmalloc" : "linear");
L
Linus Torvalds 已提交
8322 8323 8324 8325 8326 8327 8328 8329

	if (_hash_shift)
		*_hash_shift = log2qty;
	if (_hash_mask)
		*_hash_mask = (1 << log2qty) - 1;

	return table;
}
8330

K
KAMEZAWA Hiroyuki 已提交
8331
/*
8332 8333
 * This function checks whether pageblock includes unmovable pages or not.
 *
8334
 * PageLRU check without isolation or lru_lock could race so that
8335 8336 8337
 * MIGRATE_MOVABLE block might include unmovable pages. And __PageMovable
 * check without lock_page also may miss some movable non-lru pages at
 * race condition. So you can't expect this function should be exact.
8338 8339
 *
 * Returns a page without holding a reference. If the caller wants to
8340
 * dereference that page (e.g., dumping), it has to make sure that it
8341 8342
 * cannot get removed (e.g., via memory unplug) concurrently.
 *
K
KAMEZAWA Hiroyuki 已提交
8343
 */
8344 8345
struct page *has_unmovable_pages(struct zone *zone, struct page *page,
				 int migratetype, int flags)
8346
{
8347 8348
	unsigned long iter = 0;
	unsigned long pfn = page_to_pfn(page);
8349
	unsigned long offset = pfn % pageblock_nr_pages;
8350

8351 8352 8353 8354 8355 8356 8357
	if (is_migrate_cma_page(page)) {
		/*
		 * CMA allocations (alloc_contig_range) really need to mark
		 * isolate CMA pageblocks even when they are not movable in fact
		 * so consider them movable here.
		 */
		if (is_migrate_cma(migratetype))
8358
			return NULL;
8359

8360
		return page;
8361
	}
8362

8363
	for (; iter < pageblock_nr_pages - offset; iter++) {
8364
		if (!pfn_valid_within(pfn + iter))
8365
			continue;
8366

8367
		page = pfn_to_page(pfn + iter);
8368

8369 8370 8371 8372 8373 8374
		/*
		 * Both, bootmem allocations and memory holes are marked
		 * PG_reserved and are unmovable. We can even have unmovable
		 * allocations inside ZONE_MOVABLE, for example when
		 * specifying "movablecore".
		 */
8375
		if (PageReserved(page))
8376
			return page;
8377

8378 8379 8380 8381 8382 8383 8384 8385
		/*
		 * If the zone is movable and we have ruled out all reserved
		 * pages then it should be reasonably safe to assume the rest
		 * is movable.
		 */
		if (zone_idx(zone) == ZONE_MOVABLE)
			continue;

8386 8387
		/*
		 * Hugepages are not in LRU lists, but they're movable.
8388
		 * THPs are on the LRU, but need to be counted as #small pages.
W
Wei Yang 已提交
8389
		 * We need not scan over tail pages because we don't
8390 8391
		 * handle each tail page individually in migration.
		 */
8392
		if (PageHuge(page) || PageTransCompound(page)) {
8393 8394
			struct page *head = compound_head(page);
			unsigned int skip_pages;
8395

8396 8397 8398 8399
			if (PageHuge(page)) {
				if (!hugepage_migration_supported(page_hstate(head)))
					return page;
			} else if (!PageLRU(head) && !__PageMovable(head)) {
8400
				return page;
8401
			}
8402

8403
			skip_pages = compound_nr(head) - (page - head);
8404
			iter += skip_pages - 1;
8405 8406 8407
			continue;
		}

8408 8409 8410 8411
		/*
		 * We can't use page_count without pin a page
		 * because another CPU can free compound page.
		 * This check already skips compound tails of THP
8412
		 * because their page->_refcount is zero at all time.
8413
		 */
8414
		if (!page_ref_count(page)) {
8415
			if (PageBuddy(page))
8416
				iter += (1 << buddy_order(page)) - 1;
8417 8418
			continue;
		}
8419

8420 8421 8422 8423
		/*
		 * The HWPoisoned page may be not in buddy system, and
		 * page_count() is not 0.
		 */
8424
		if ((flags & MEMORY_OFFLINE) && PageHWPoison(page))
8425 8426
			continue;

8427 8428 8429 8430 8431 8432 8433 8434 8435 8436 8437 8438 8439
		/*
		 * We treat all PageOffline() pages as movable when offlining
		 * to give drivers a chance to decrement their reference count
		 * in MEM_GOING_OFFLINE in order to indicate that these pages
		 * can be offlined as there are no direct references anymore.
		 * For actually unmovable PageOffline() where the driver does
		 * not support this, we will fail later when trying to actually
		 * move these pages that still have a reference count > 0.
		 * (false negatives in this function only)
		 */
		if ((flags & MEMORY_OFFLINE) && PageOffline(page))
			continue;

8440
		if (__PageMovable(page) || PageLRU(page))
8441 8442
			continue;

8443
		/*
8444 8445 8446
		 * If there are RECLAIMABLE pages, we need to check
		 * it.  But now, memory offline itself doesn't call
		 * shrink_node_slabs() and it still to be fixed.
8447
		 */
8448
		return page;
8449
	}
8450
	return NULL;
8451 8452
}

8453
#ifdef CONFIG_CONTIG_ALLOC
8454 8455 8456 8457 8458 8459 8460 8461 8462 8463 8464 8465 8466
static unsigned long pfn_max_align_down(unsigned long pfn)
{
	return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
			     pageblock_nr_pages) - 1);
}

static unsigned long pfn_max_align_up(unsigned long pfn)
{
	return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
				pageblock_nr_pages));
}

/* [start, end) must belong to a single zone. */
8467 8468
static int __alloc_contig_migrate_range(struct compact_control *cc,
					unsigned long start, unsigned long end)
8469 8470
{
	/* This function is based on compact_zone() from compaction.c. */
8471
	unsigned int nr_reclaimed;
8472 8473 8474
	unsigned long pfn = start;
	unsigned int tries = 0;
	int ret = 0;
8475 8476 8477 8478
	struct migration_target_control mtc = {
		.nid = zone_to_nid(cc->zone),
		.gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
	};
8479

8480
	migrate_prep();
8481

8482
	while (pfn < end || !list_empty(&cc->migratepages)) {
8483 8484 8485 8486 8487
		if (fatal_signal_pending(current)) {
			ret = -EINTR;
			break;
		}

8488 8489
		if (list_empty(&cc->migratepages)) {
			cc->nr_migratepages = 0;
8490
			pfn = isolate_migratepages_range(cc, pfn, end);
8491 8492 8493 8494 8495 8496 8497 8498 8499 8500
			if (!pfn) {
				ret = -EINTR;
				break;
			}
			tries = 0;
		} else if (++tries == 5) {
			ret = ret < 0 ? ret : -EBUSY;
			break;
		}

8501 8502 8503
		nr_reclaimed = reclaim_clean_pages_from_list(cc->zone,
							&cc->migratepages);
		cc->nr_migratepages -= nr_reclaimed;
8504

8505 8506
		ret = migrate_pages(&cc->migratepages, alloc_migration_target,
				NULL, (unsigned long)&mtc, cc->mode, MR_CONTIG_RANGE);
8507
	}
8508 8509 8510 8511 8512
	if (ret < 0) {
		putback_movable_pages(&cc->migratepages);
		return ret;
	}
	return 0;
8513 8514 8515 8516 8517 8518
}

/**
 * alloc_contig_range() -- tries to allocate given range of pages
 * @start:	start PFN to allocate
 * @end:	one-past-the-last PFN to allocate
8519 8520 8521 8522
 * @migratetype:	migratetype of the underlaying pageblocks (either
 *			#MIGRATE_MOVABLE or #MIGRATE_CMA).  All pageblocks
 *			in range must have the same migratetype and it must
 *			be either of the two.
8523
 * @gfp_mask:	GFP mask to use during compaction
8524 8525
 *
 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
8526
 * aligned.  The PFN range must belong to a single zone.
8527
 *
8528 8529 8530
 * The first thing this routine does is attempt to MIGRATE_ISOLATE all
 * pageblocks in the range.  Once isolated, the pageblocks should not
 * be modified by others.
8531
 *
8532
 * Return: zero on success or negative error code.  On success all
8533 8534 8535
 * pages which PFN is in [start, end) are allocated for the caller and
 * need to be freed with free_contig_range().
 */
8536
int alloc_contig_range(unsigned long start, unsigned long end,
8537
		       unsigned migratetype, gfp_t gfp_mask)
8538 8539
{
	unsigned long outer_start, outer_end;
8540 8541
	unsigned int order;
	int ret = 0;
8542

8543 8544 8545 8546
	struct compact_control cc = {
		.nr_migratepages = 0,
		.order = -1,
		.zone = page_zone(pfn_to_page(start)),
8547
		.mode = MIGRATE_SYNC,
8548
		.ignore_skip_hint = true,
8549
		.no_set_skip_hint = true,
8550
		.gfp_mask = current_gfp_context(gfp_mask),
8551
		.alloc_contig = true,
8552 8553 8554
	};
	INIT_LIST_HEAD(&cc.migratepages);

8555 8556 8557 8558 8559 8560 8561 8562 8563 8564 8565 8566 8567 8568 8569 8570 8571 8572 8573 8574 8575 8576 8577 8578 8579
	/*
	 * What we do here is we mark all pageblocks in range as
	 * MIGRATE_ISOLATE.  Because pageblock and max order pages may
	 * have different sizes, and due to the way page allocator
	 * work, we align the range to biggest of the two pages so
	 * that page allocator won't try to merge buddies from
	 * different pageblocks and change MIGRATE_ISOLATE to some
	 * other migration type.
	 *
	 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
	 * migrate the pages from an unaligned range (ie. pages that
	 * we are interested in).  This will put all the pages in
	 * range back to page allocator as MIGRATE_ISOLATE.
	 *
	 * When this is done, we take the pages in range from page
	 * allocator removing them from the buddy system.  This way
	 * page allocator will never consider using them.
	 *
	 * This lets us mark the pageblocks back as
	 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
	 * aligned range but not in the unaligned, original range are
	 * put back to page allocator so that buddy can use them.
	 */

	ret = start_isolate_page_range(pfn_max_align_down(start),
8580
				       pfn_max_align_up(end), migratetype, 0);
8581
	if (ret)
8582
		return ret;
8583

8584 8585
	drain_all_pages(cc.zone);

8586 8587
	/*
	 * In case of -EBUSY, we'd like to know which page causes problem.
8588 8589 8590 8591 8592 8593 8594
	 * So, just fall through. test_pages_isolated() has a tracepoint
	 * which will report the busy page.
	 *
	 * It is possible that busy pages could become available before
	 * the call to test_pages_isolated, and the range will actually be
	 * allocated.  So, if we fall through be sure to clear ret so that
	 * -EBUSY is not accidentally used or returned to caller.
8595
	 */
8596
	ret = __alloc_contig_migrate_range(&cc, start, end);
8597
	if (ret && ret != -EBUSY)
8598
		goto done;
8599
	ret =0;
8600 8601 8602 8603 8604 8605 8606 8607 8608 8609 8610 8611 8612 8613 8614 8615 8616 8617 8618 8619 8620 8621 8622 8623

	/*
	 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
	 * aligned blocks that are marked as MIGRATE_ISOLATE.  What's
	 * more, all pages in [start, end) are free in page allocator.
	 * What we are going to do is to allocate all pages from
	 * [start, end) (that is remove them from page allocator).
	 *
	 * The only problem is that pages at the beginning and at the
	 * end of interesting range may be not aligned with pages that
	 * page allocator holds, ie. they can be part of higher order
	 * pages.  Because of this, we reserve the bigger range and
	 * once this is done free the pages we are not interested in.
	 *
	 * We don't have to hold zone->lock here because the pages are
	 * isolated thus they won't get removed from buddy.
	 */

	lru_add_drain_all();

	order = 0;
	outer_start = start;
	while (!PageBuddy(pfn_to_page(outer_start))) {
		if (++order >= MAX_ORDER) {
8624 8625
			outer_start = start;
			break;
8626 8627 8628 8629
		}
		outer_start &= ~0UL << order;
	}

8630
	if (outer_start != start) {
8631
		order = buddy_order(pfn_to_page(outer_start));
8632 8633 8634 8635 8636 8637 8638 8639 8640 8641 8642

		/*
		 * outer_start page could be small order buddy page and
		 * it doesn't include start page. Adjust outer_start
		 * in this case to report failed page properly
		 * on tracepoint in test_pages_isolated()
		 */
		if (outer_start + (1UL << order) <= start)
			outer_start = start;
	}

8643
	/* Make sure the range is really isolated. */
8644
	if (test_pages_isolated(outer_start, end, 0)) {
8645
		pr_info_ratelimited("%s: [%lx, %lx) PFNs busy\n",
8646
			__func__, outer_start, end);
8647 8648 8649 8650
		ret = -EBUSY;
		goto done;
	}

8651
	/* Grab isolated pages from freelists. */
8652
	outer_end = isolate_freepages_range(&cc, outer_start, end);
8653 8654 8655 8656 8657 8658 8659 8660 8661 8662 8663 8664 8665
	if (!outer_end) {
		ret = -EBUSY;
		goto done;
	}

	/* Free head and tail (if any) */
	if (start != outer_start)
		free_contig_range(outer_start, start - outer_start);
	if (end != outer_end)
		free_contig_range(end, outer_end - end);

done:
	undo_isolate_page_range(pfn_max_align_down(start),
8666
				pfn_max_align_up(end), migratetype);
8667 8668
	return ret;
}
8669
EXPORT_SYMBOL(alloc_contig_range);
8670 8671 8672 8673 8674 8675 8676 8677 8678 8679 8680 8681 8682 8683 8684 8685 8686 8687 8688 8689 8690 8691 8692 8693 8694 8695 8696 8697 8698 8699 8700 8701 8702 8703 8704 8705 8706 8707 8708 8709 8710 8711 8712 8713 8714 8715 8716 8717 8718 8719 8720 8721 8722 8723 8724 8725 8726 8727 8728 8729 8730 8731 8732 8733 8734 8735 8736 8737 8738 8739 8740 8741 8742 8743 8744 8745 8746 8747 8748 8749 8750 8751 8752 8753 8754 8755 8756 8757 8758 8759 8760 8761 8762 8763 8764 8765 8766 8767 8768 8769 8770

static int __alloc_contig_pages(unsigned long start_pfn,
				unsigned long nr_pages, gfp_t gfp_mask)
{
	unsigned long end_pfn = start_pfn + nr_pages;

	return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
				  gfp_mask);
}

static bool pfn_range_valid_contig(struct zone *z, unsigned long start_pfn,
				   unsigned long nr_pages)
{
	unsigned long i, end_pfn = start_pfn + nr_pages;
	struct page *page;

	for (i = start_pfn; i < end_pfn; i++) {
		page = pfn_to_online_page(i);
		if (!page)
			return false;

		if (page_zone(page) != z)
			return false;

		if (PageReserved(page))
			return false;

		if (page_count(page) > 0)
			return false;

		if (PageHuge(page))
			return false;
	}
	return true;
}

static bool zone_spans_last_pfn(const struct zone *zone,
				unsigned long start_pfn, unsigned long nr_pages)
{
	unsigned long last_pfn = start_pfn + nr_pages - 1;

	return zone_spans_pfn(zone, last_pfn);
}

/**
 * alloc_contig_pages() -- tries to find and allocate contiguous range of pages
 * @nr_pages:	Number of contiguous pages to allocate
 * @gfp_mask:	GFP mask to limit search and used during compaction
 * @nid:	Target node
 * @nodemask:	Mask for other possible nodes
 *
 * This routine is a wrapper around alloc_contig_range(). It scans over zones
 * on an applicable zonelist to find a contiguous pfn range which can then be
 * tried for allocation with alloc_contig_range(). This routine is intended
 * for allocation requests which can not be fulfilled with the buddy allocator.
 *
 * The allocated memory is always aligned to a page boundary. If nr_pages is a
 * power of two then the alignment is guaranteed to be to the given nr_pages
 * (e.g. 1GB request would be aligned to 1GB).
 *
 * Allocated pages can be freed with free_contig_range() or by manually calling
 * __free_page() on each allocated page.
 *
 * Return: pointer to contiguous pages on success, or NULL if not successful.
 */
struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
				int nid, nodemask_t *nodemask)
{
	unsigned long ret, pfn, flags;
	struct zonelist *zonelist;
	struct zone *zone;
	struct zoneref *z;

	zonelist = node_zonelist(nid, gfp_mask);
	for_each_zone_zonelist_nodemask(zone, z, zonelist,
					gfp_zone(gfp_mask), nodemask) {
		spin_lock_irqsave(&zone->lock, flags);

		pfn = ALIGN(zone->zone_start_pfn, nr_pages);
		while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
			if (pfn_range_valid_contig(zone, pfn, nr_pages)) {
				/*
				 * We release the zone lock here because
				 * alloc_contig_range() will also lock the zone
				 * at some point. If there's an allocation
				 * spinning on this lock, it may win the race
				 * and cause alloc_contig_range() to fail...
				 */
				spin_unlock_irqrestore(&zone->lock, flags);
				ret = __alloc_contig_pages(pfn, nr_pages,
							gfp_mask);
				if (!ret)
					return pfn_to_page(pfn);
				spin_lock_irqsave(&zone->lock, flags);
			}
			pfn += nr_pages;
		}
		spin_unlock_irqrestore(&zone->lock, flags);
	}
	return NULL;
}
8771
#endif /* CONFIG_CONTIG_ALLOC */
8772

8773
void free_contig_range(unsigned long pfn, unsigned int nr_pages)
8774
{
8775 8776 8777 8778 8779 8780 8781 8782 8783
	unsigned int count = 0;

	for (; nr_pages--; pfn++) {
		struct page *page = pfn_to_page(pfn);

		count += page_count(page) != 1;
		__free_page(page);
	}
	WARN(count != 0, "%d pages are still in use!\n", count);
8784
}
8785
EXPORT_SYMBOL(free_contig_range);
8786

8787 8788 8789 8790
/*
 * The zone indicated has a new number of managed_pages; batch sizes and percpu
 * page high values need to be recalulated.
 */
8791 8792
void __meminit zone_pcp_update(struct zone *zone)
{
8793
	mutex_lock(&pcp_batch_high_lock);
8794
	zone_set_pageset_high_and_batch(zone);
8795
	mutex_unlock(&pcp_batch_high_lock);
8796 8797
}

8798 8799 8800 8801 8802 8803 8804 8805 8806 8807 8808 8809 8810 8811 8812 8813 8814 8815 8816 8817 8818
/*
 * Effectively disable pcplists for the zone by setting the high limit to 0
 * and draining all cpus. A concurrent page freeing on another CPU that's about
 * to put the page on pcplist will either finish before the drain and the page
 * will be drained, or observe the new high limit and skip the pcplist.
 *
 * Must be paired with a call to zone_pcp_enable().
 */
void zone_pcp_disable(struct zone *zone)
{
	mutex_lock(&pcp_batch_high_lock);
	__zone_set_pageset_high_and_batch(zone, 0, 1);
	__drain_all_pages(zone, true);
}

void zone_pcp_enable(struct zone *zone)
{
	__zone_set_pageset_high_and_batch(zone, zone->pageset_high, zone->pageset_batch);
	mutex_unlock(&pcp_batch_high_lock);
}

8819 8820 8821
void zone_pcp_reset(struct zone *zone)
{
	unsigned long flags;
8822 8823
	int cpu;
	struct per_cpu_pageset *pset;
8824 8825 8826 8827

	/* avoid races with drain_pages()  */
	local_irq_save(flags);
	if (zone->pageset != &boot_pageset) {
8828 8829 8830 8831
		for_each_online_cpu(cpu) {
			pset = per_cpu_ptr(zone->pageset, cpu);
			drain_zonestat(zone, pset);
		}
8832 8833 8834 8835 8836 8837
		free_percpu(zone->pageset);
		zone->pageset = &boot_pageset;
	}
	local_irq_restore(flags);
}

8838
#ifdef CONFIG_MEMORY_HOTREMOVE
K
KAMEZAWA Hiroyuki 已提交
8839
/*
8840 8841
 * All pages in the range must be in a single zone, must not contain holes,
 * must span full sections, and must be isolated before calling this function.
K
KAMEZAWA Hiroyuki 已提交
8842
 */
8843
void __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
K
KAMEZAWA Hiroyuki 已提交
8844
{
8845
	unsigned long pfn = start_pfn;
K
KAMEZAWA Hiroyuki 已提交
8846 8847
	struct page *page;
	struct zone *zone;
8848
	unsigned int order;
K
KAMEZAWA Hiroyuki 已提交
8849
	unsigned long flags;
8850

8851
	offline_mem_sections(pfn, end_pfn);
K
KAMEZAWA Hiroyuki 已提交
8852 8853 8854 8855
	zone = page_zone(pfn_to_page(pfn));
	spin_lock_irqsave(&zone->lock, flags);
	while (pfn < end_pfn) {
		page = pfn_to_page(pfn);
8856 8857 8858 8859 8860 8861 8862 8863
		/*
		 * The HWPoisoned page may be not in buddy system, and
		 * page_count() is not 0.
		 */
		if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
			pfn++;
			continue;
		}
8864 8865 8866 8867 8868 8869 8870 8871 8872 8873
		/*
		 * At this point all remaining PageOffline() pages have a
		 * reference count of 0 and can simply be skipped.
		 */
		if (PageOffline(page)) {
			BUG_ON(page_count(page));
			BUG_ON(PageBuddy(page));
			pfn++;
			continue;
		}
8874

K
KAMEZAWA Hiroyuki 已提交
8875 8876
		BUG_ON(page_count(page));
		BUG_ON(!PageBuddy(page));
8877
		order = buddy_order(page);
8878
		del_page_from_free_list(page, zone, order);
K
KAMEZAWA Hiroyuki 已提交
8879 8880 8881 8882 8883
		pfn += (1 << order);
	}
	spin_unlock_irqrestore(&zone->lock, flags);
}
#endif
8884 8885 8886 8887 8888 8889

bool is_free_buddy_page(struct page *page)
{
	struct zone *zone = page_zone(page);
	unsigned long pfn = page_to_pfn(page);
	unsigned long flags;
8890
	unsigned int order;
8891 8892 8893 8894 8895

	spin_lock_irqsave(&zone->lock, flags);
	for (order = 0; order < MAX_ORDER; order++) {
		struct page *page_head = page - (pfn & ((1 << order) - 1));

8896
		if (PageBuddy(page_head) && buddy_order(page_head) >= order)
8897 8898 8899 8900 8901 8902
			break;
	}
	spin_unlock_irqrestore(&zone->lock, flags);

	return order < MAX_ORDER;
}
8903 8904 8905

#ifdef CONFIG_MEMORY_FAILURE
/*
8906 8907
 * Break down a higher-order page in sub-pages, and keep our target out of
 * buddy allocator.
8908
 */
8909 8910 8911 8912 8913 8914 8915 8916 8917 8918 8919 8920 8921 8922 8923 8924 8925 8926 8927 8928 8929 8930 8931 8932
static void break_down_buddy_pages(struct zone *zone, struct page *page,
				   struct page *target, int low, int high,
				   int migratetype)
{
	unsigned long size = 1 << high;
	struct page *current_buddy, *next_page;

	while (high > low) {
		high--;
		size >>= 1;

		if (target >= &page[size]) {
			next_page = page + size;
			current_buddy = page;
		} else {
			next_page = page;
			current_buddy = page + size;
		}

		if (set_page_guard(zone, current_buddy, high, migratetype))
			continue;

		if (current_buddy != target) {
			add_to_free_list(current_buddy, zone, high, migratetype);
8933
			set_buddy_order(current_buddy, high);
8934 8935 8936 8937 8938 8939 8940 8941 8942
			page = next_page;
		}
	}
}

/*
 * Take a page that will be marked as poisoned off the buddy allocator.
 */
bool take_page_off_buddy(struct page *page)
8943 8944 8945 8946 8947
{
	struct zone *zone = page_zone(page);
	unsigned long pfn = page_to_pfn(page);
	unsigned long flags;
	unsigned int order;
8948
	bool ret = false;
8949 8950 8951 8952

	spin_lock_irqsave(&zone->lock, flags);
	for (order = 0; order < MAX_ORDER; order++) {
		struct page *page_head = page - (pfn & ((1 << order) - 1));
8953
		int page_order = buddy_order(page_head);
8954

8955
		if (PageBuddy(page_head) && page_order >= order) {
8956 8957 8958 8959
			unsigned long pfn_head = page_to_pfn(page_head);
			int migratetype = get_pfnblock_migratetype(page_head,
								   pfn_head);

8960
			del_page_from_free_list(page_head, zone, page_order);
8961
			break_down_buddy_pages(zone, page_head, page, 0,
8962
						page_order, migratetype);
8963
			ret = true;
8964 8965
			break;
		}
8966 8967
		if (page_count(page_head) > 0)
			break;
8968 8969
	}
	spin_unlock_irqrestore(&zone->lock, flags);
8970
	return ret;
8971 8972
}
#endif