mmzone.h 40.9 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H

#ifndef __ASSEMBLY__
C
Christoph Lameter 已提交
6
#ifndef __GENERATING_BOUNDS_H
L
Linus Torvalds 已提交
7 8 9 10

#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
11
#include <linux/bitops.h>
L
Linus Torvalds 已提交
12 13 14 15
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
16
#include <linux/seqlock.h>
17
#include <linux/nodemask.h>
18
#include <linux/pageblock-flags.h>
19
#include <linux/page-flags-layout.h>
A
Arun Sharma 已提交
20
#include <linux/atomic.h>
21 22
#include <linux/mm_types.h>
#include <linux/page-flags.h>
R
Ralf Baechle 已提交
23
#include <asm/page.h>
L
Linus Torvalds 已提交
24 25 26 27 28 29 30

/* Free memory management - zoned buddy allocator.  */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define MAX_ORDER 11
#else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif
31
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
L
Linus Torvalds 已提交
32

A
Andy Whitcroft 已提交
33 34 35
/*
 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
 * costly to service.  That is between allocation orders which should
36
 * coalesce naturally under reasonable reclaim pressure and those which
A
Andy Whitcroft 已提交
37 38 39 40
 * will not.
 */
#define PAGE_ALLOC_COSTLY_ORDER 3

41
enum migratetype {
42 43
	MIGRATE_UNMOVABLE,
	MIGRATE_MOVABLE,
44
	MIGRATE_RECLAIMABLE,
45 46
	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62
#ifdef CONFIG_CMA
	/*
	 * MIGRATE_CMA migration type is designed to mimic the way
	 * ZONE_MOVABLE works.  Only movable pages can be allocated
	 * from MIGRATE_CMA pageblocks and page allocator never
	 * implicitly change migration type of MIGRATE_CMA pageblock.
	 *
	 * The way to use it is to change migratetype of a range of
	 * pageblocks to MIGRATE_CMA which can be done by
	 * __free_pageblock_cma() function.  What is important though
	 * is that a range of pageblocks must be aligned to
	 * MAX_ORDER_NR_PAGES should biggest page be bigger then
	 * a single pageblock.
	 */
	MIGRATE_CMA,
#endif
63
#ifdef CONFIG_MEMORY_ISOLATION
64
	MIGRATE_ISOLATE,	/* can't allocate from here */
65
#endif
66 67 68
	MIGRATE_TYPES
};

69
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
70
extern const char * const migratetype_names[MIGRATE_TYPES];
71

72 73
#ifdef CONFIG_CMA
#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
L
Laura Abbott 已提交
74
#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
75 76
#else
#  define is_migrate_cma(migratetype) false
L
Laura Abbott 已提交
77
#  define is_migrate_cma_page(_page) false
78
#endif
79

80 81 82 83 84
static inline bool is_migrate_movable(int mt)
{
	return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
}

85 86 87 88
#define for_each_migratetype_order(order, type) \
	for (order = 0; order < MAX_ORDER; order++) \
		for (type = 0; type < MIGRATE_TYPES; type++)

89 90
extern int page_group_by_mobility_disabled;

91 92 93
#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)

94 95 96 97
#define get_pageblock_migratetype(page)					\
	get_pfnblock_flags_mask(page, page_to_pfn(page),		\
			PB_migrate_end, MIGRATETYPE_MASK)

L
Linus Torvalds 已提交
98
struct free_area {
99
	struct list_head	free_list[MIGRATE_TYPES];
L
Linus Torvalds 已提交
100 101 102
	unsigned long		nr_free;
};

103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146
/* Used for pages not on another list */
static inline void add_to_free_area(struct page *page, struct free_area *area,
			     int migratetype)
{
	list_add(&page->lru, &area->free_list[migratetype]);
	area->nr_free++;
}

/* Used for pages not on another list */
static inline void add_to_free_area_tail(struct page *page, struct free_area *area,
				  int migratetype)
{
	list_add_tail(&page->lru, &area->free_list[migratetype]);
	area->nr_free++;
}

/* Used for pages which are on another list */
static inline void move_to_free_area(struct page *page, struct free_area *area,
			     int migratetype)
{
	list_move(&page->lru, &area->free_list[migratetype]);
}

static inline struct page *get_page_from_free_area(struct free_area *area,
					    int migratetype)
{
	return list_first_entry_or_null(&area->free_list[migratetype],
					struct page, lru);
}

static inline void del_page_from_free_area(struct page *page,
		struct free_area *area)
{
	list_del(&page->lru);
	__ClearPageBuddy(page);
	set_page_private(page, 0);
	area->nr_free--;
}

static inline bool free_area_empty(struct free_area *area, int migratetype)
{
	return list_empty(&area->free_list[migratetype]);
}

L
Linus Torvalds 已提交
147 148 149
struct pglist_data;

/*
150
 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
L
Linus Torvalds 已提交
151 152 153 154 155 156 157
 * So add a wild amount of padding here to ensure that they fall into separate
 * cachelines.  There are very few zone structures in the machine, so space
 * consumption is not a concern here.
 */
#if defined(CONFIG_SMP)
struct zone_padding {
	char x[0];
158
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
159 160 161 162 163
#define ZONE_PADDING(name)	struct zone_padding name;
#else
#define ZONE_PADDING(name)
#endif

164 165 166 167 168 169 170 171 172 173 174 175 176 177
#ifdef CONFIG_NUMA
enum numa_stat_item {
	NUMA_HIT,		/* allocated in intended node */
	NUMA_MISS,		/* allocated in non intended node */
	NUMA_FOREIGN,		/* was intended here, hit elsewhere */
	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
	NUMA_LOCAL,		/* allocation from local node */
	NUMA_OTHER,		/* allocation from other node */
	NR_VM_NUMA_STAT_ITEMS
};
#else
#define NR_VM_NUMA_STAT_ITEMS 0
#endif

178
enum zone_stat_item {
179
	/* First 128 byte cacheline (assuming 64 bit words) */
180
	NR_FREE_PAGES,
M
Minchan Kim 已提交
181 182 183 184 185 186
	NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
	NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
	NR_ZONE_ACTIVE_ANON,
	NR_ZONE_INACTIVE_FILE,
	NR_ZONE_ACTIVE_FILE,
	NR_ZONE_UNEVICTABLE,
187
	NR_ZONE_WRITE_PENDING,	/* Count of dirty, writeback and unstable pages */
N
Nick Piggin 已提交
188
	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
189
	NR_PAGETABLE,		/* used for pagetables */
190
	NR_KERNEL_STACK_KB,	/* measured in KiB */
191
	/* Second 128 byte cacheline */
192
	NR_BOUNCE,
M
Minchan Kim 已提交
193 194
#if IS_ENABLED(CONFIG_ZSMALLOC)
	NR_ZSPAGES,		/* allocated in zsmalloc */
195
#endif
196
	NR_FREE_CMA_PAGES,
197 198
	NR_VM_ZONE_STAT_ITEMS };

199
enum node_stat_item {
M
Mel Gorman 已提交
200 201 202 203 204 205
	NR_LRU_BASE,
	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */
	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */
	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
206 207
	NR_SLAB_RECLAIMABLE,
	NR_SLAB_UNRECLAIMABLE,
M
Mel Gorman 已提交
208 209
	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */
	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
210
	WORKINGSET_NODES,
211 212
	WORKINGSET_REFAULT,
	WORKINGSET_ACTIVATE,
213
	WORKINGSET_RESTORE,
214
	WORKINGSET_NODERECLAIM,
215
	NR_ANON_MAPPED,	/* Mapped anonymous pages */
216 217
	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
			   only modified from process context */
218 219 220 221 222 223 224 225 226
	NR_FILE_PAGES,
	NR_FILE_DIRTY,
	NR_WRITEBACK,
	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */
	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */
	NR_SHMEM_THPS,
	NR_SHMEM_PMDMAPPED,
	NR_ANON_THPS,
	NR_UNSTABLE_NFS,	/* NFS unstable pages */
227 228 229 230
	NR_VMSCAN_WRITE,
	NR_VMSCAN_IMMEDIATE,	/* Prioritise for reclaim when writeback ends */
	NR_DIRTIED,		/* page dirtyings since bootup */
	NR_WRITTEN,		/* page writings since bootup */
231
	NR_KERNEL_MISC_RECLAIMABLE,	/* reclaimable non-slab kernel pages */
232 233 234
	NR_VM_NODE_STAT_ITEMS
};

235 236 237 238 239 240 241 242 243 244 245 246 247
/*
 * We do arithmetic on the LRU lists in various places in the code,
 * so it is important to keep the active lists LRU_ACTIVE higher in
 * the array than the corresponding inactive lists, and to keep
 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
 *
 * This has to be kept in sync with the statistics in zone_stat_item
 * above and the descriptions in vmstat_text in mm/vmstat.c
 */
#define LRU_BASE 0
#define LRU_ACTIVE 1
#define LRU_FILE 2

248
enum lru_list {
249 250 251 252
	LRU_INACTIVE_ANON = LRU_BASE,
	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
L
Lee Schermerhorn 已提交
253 254 255
	LRU_UNEVICTABLE,
	NR_LRU_LISTS
};
256

H
Hugh Dickins 已提交
257
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
258

H
Hugh Dickins 已提交
259
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
L
Lee Schermerhorn 已提交
260

H
Hugh Dickins 已提交
261
static inline int is_file_lru(enum lru_list lru)
262
{
H
Hugh Dickins 已提交
263
	return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
264 265
}

H
Hugh Dickins 已提交
266
static inline int is_active_lru(enum lru_list lru)
267
{
H
Hugh Dickins 已提交
268
	return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
269 270
}

271 272 273
struct zone_reclaim_stat {
	/*
	 * The pageout code in vmscan.c keeps track of how many of the
274
	 * mem/swap backed and file backed pages are referenced.
275 276 277 278 279 280 281 282 283
	 * The higher the rotated/scanned ratio, the more valuable
	 * that cache is.
	 *
	 * The anon LRU stats live in [0], file LRU stats in [1]
	 */
	unsigned long		recent_rotated[2];
	unsigned long		recent_scanned[2];
};

284
struct lruvec {
285 286 287 288
	struct list_head		lists[NR_LRU_LISTS];
	struct zone_reclaim_stat	reclaim_stat;
	/* Evictions & activations on the inactive file list */
	atomic_long_t			inactive_age;
289 290
	/* Refaults at the time of last reclaim cycle */
	unsigned long			refaults;
A
Andrew Morton 已提交
291
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
292
	struct pglist_data *pgdat;
293
#endif
294 295
};

296
/* Isolate unmapped file */
297
#define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2)
298
/* Isolate for asynchronous migration */
299
#define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x4)
M
Minchan Kim 已提交
300 301
/* Isolate unevictable pages */
#define ISOLATE_UNEVICTABLE	((__force isolate_mode_t)0x8)
302 303

/* LRU Isolation modes. */
304
typedef unsigned __bitwise isolate_mode_t;
305

306 307 308 309 310 311 312
enum zone_watermarks {
	WMARK_MIN,
	WMARK_LOW,
	WMARK_HIGH,
	NR_WMARK
};

313 314 315 316
#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost)
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost)
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
317

L
Linus Torvalds 已提交
318 319 320 321
struct per_cpu_pages {
	int count;		/* number of pages in the list */
	int high;		/* high watermark, emptying needed */
	int batch;		/* chunk size for buddy add/remove */
322 323 324

	/* Lists of pages, one per migrate type stored on the pcp-lists */
	struct list_head lists[MIGRATE_PCPTYPES];
L
Linus Torvalds 已提交
325 326 327
};

struct per_cpu_pageset {
328
	struct per_cpu_pages pcp;
329 330
#ifdef CONFIG_NUMA
	s8 expire;
331
	u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
332
#endif
333
#ifdef CONFIG_SMP
334
	s8 stat_threshold;
335 336
	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
337
};
338

339 340 341 342 343
struct per_cpu_nodestat {
	s8 stat_threshold;
	s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};

C
Christoph Lameter 已提交
344 345
#endif /* !__GENERATING_BOUNDS.H */

346
enum zone_type {
347
#ifdef CONFIG_ZONE_DMA
348 349 350 351 352 353 354 355 356 357 358
	/*
	 * ZONE_DMA is used when there are devices that are not able
	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
	 * carve out the portion of memory that is needed for these devices.
	 * The range is arch specific.
	 *
	 * Some examples
	 *
	 * Architecture		Limit
	 * ---------------------------
	 * parisc, ia64, sparc	<4G
359
	 * s390, powerpc	<2G
360 361 362 363 364 365 366
	 * arm			Various
	 * alpha		Unlimited or 0-16MB.
	 *
	 * i386, x86_64 and multiple other arches
	 * 			<16M.
	 */
	ZONE_DMA,
367
#endif
368
#ifdef CONFIG_ZONE_DMA32
369 370 371 372 373 374
	/*
	 * x86_64 needs two ZONE_DMAs because it supports devices that are
	 * only able to do DMA to the lower 16M but also 32 bit devices that
	 * can only do DMA areas below 4G.
	 */
	ZONE_DMA32,
375
#endif
376 377 378 379 380 381
	/*
	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
	 * performed on pages in ZONE_NORMAL if the DMA devices support
	 * transfers to all addressable memory.
	 */
	ZONE_NORMAL,
382
#ifdef CONFIG_HIGHMEM
383 384 385 386 387 388 389 390 391
	/*
	 * A memory area that is only addressable by the kernel through
	 * mapping portions into its own address space. This is for example
	 * used by i386 to allow the kernel to address the memory beyond
	 * 900MB. The kernel will set up special mappings (page
	 * table entries on i386) for each page that the kernel needs to
	 * access.
	 */
	ZONE_HIGHMEM,
392
#endif
M
Mel Gorman 已提交
393
	ZONE_MOVABLE,
394 395 396
#ifdef CONFIG_ZONE_DEVICE
	ZONE_DEVICE,
#endif
C
Christoph Lameter 已提交
397
	__MAX_NR_ZONES
398

399
};
L
Linus Torvalds 已提交
400

C
Christoph Lameter 已提交
401 402
#ifndef __GENERATING_BOUNDS_H

L
Linus Torvalds 已提交
403
struct zone {
404
	/* Read-mostly fields */
405 406

	/* zone watermarks, access with *_wmark_pages(zone) macros */
407
	unsigned long _watermark[NR_WMARK];
408
	unsigned long watermark_boost;
409

410 411
	unsigned long nr_reserved_highatomic;

L
Linus Torvalds 已提交
412
	/*
413 414 415 416 417 418 419
	 * We don't know if the memory that we're going to allocate will be
	 * freeable or/and it will be released eventually, so to avoid totally
	 * wasting several GB of ram we must reserve some of the lower zone
	 * memory (otherwise we risk to run OOM on the lower zones despite
	 * there being tons of freeable ram on the higher zones).  This array is
	 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
	 * changes.
L
Linus Torvalds 已提交
420
	 */
421
	long lowmem_reserve[MAX_NR_ZONES];
422

423
#ifdef CONFIG_NUMA
424
	int node;
425 426
#endif
	struct pglist_data	*zone_pgdat;
427
	struct per_cpu_pageset __percpu *pageset;
428

429 430
#ifndef CONFIG_SPARSEMEM
	/*
431
	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
432 433 434 435 436
	 * In SPARSEMEM, this map is stored in struct mem_section
	 */
	unsigned long		*pageblock_flags;
#endif /* CONFIG_SPARSEMEM */

L
Linus Torvalds 已提交
437 438 439
	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
	unsigned long		zone_start_pfn;

440
	/*
441 442 443
	 * spanned_pages is the total pages spanned by the zone, including
	 * holes, which is calculated as:
	 * 	spanned_pages = zone_end_pfn - zone_start_pfn;
444
	 *
445 446
	 * present_pages is physical pages existing within the zone, which
	 * is calculated as:
447
	 *	present_pages = spanned_pages - absent_pages(pages in holes);
448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467
	 *
	 * managed_pages is present pages managed by the buddy system, which
	 * is calculated as (reserved_pages includes pages allocated by the
	 * bootmem allocator):
	 *	managed_pages = present_pages - reserved_pages;
	 *
	 * So present_pages may be used by memory hotplug or memory power
	 * management logic to figure out unmanaged pages by checking
	 * (present_pages - managed_pages). And managed_pages should be used
	 * by page allocator and vm scanner to calculate all kinds of watermarks
	 * and thresholds.
	 *
	 * Locking rules:
	 *
	 * zone_start_pfn and spanned_pages are protected by span_seqlock.
	 * It is a seqlock because it has to be read outside of zone->lock,
	 * and it is done in the main allocator path.  But, it is written
	 * quite infrequently.
	 *
	 * The span_seq lock is declared along with zone->lock because it is
468 469
	 * frequently read in proximity to zone->lock.  It's good to
	 * give them a chance of being in the same cacheline.
470
	 *
471
	 * Write access to present_pages at runtime should be protected by
472 473
	 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
	 * present_pages should get_online_mems() to get a stable value.
474
	 */
475
	atomic_long_t		managed_pages;
476 477
	unsigned long		spanned_pages;
	unsigned long		present_pages;
478 479

	const char		*name;
L
Linus Torvalds 已提交
480

481 482 483 484 485 486 487 488 489
#ifdef CONFIG_MEMORY_ISOLATION
	/*
	 * Number of isolated pageblock. It is used to solve incorrect
	 * freepage counting problem due to racy retrieving migratetype
	 * of pageblock. Protected by zone->lock.
	 */
	unsigned long		nr_isolate_pageblock;
#endif

490 491 492 493 494
#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;
#endif

495
	int initialized;
496

497
	/* Write-intensive fields used from the page allocator */
498
	ZONE_PADDING(_pad1_)
499

500 501 502 503 504 505
	/* free areas of different sizes */
	struct free_area	free_area[MAX_ORDER];

	/* zone flags, see below */
	unsigned long		flags;

506
	/* Primarily protects free_area */
507 508
	spinlock_t		lock;

509
	/* Write-intensive fields used by compaction and vmstats. */
510 511 512 513 514 515 516 517 518 519 520 521 522 523
	ZONE_PADDING(_pad2_)

	/*
	 * When free pages are below this point, additional steps are taken
	 * when reading the number of free pages to avoid per-cpu counter
	 * drift allowing watermarks to be breached
	 */
	unsigned long percpu_drift_mark;

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* pfn where compaction free scanner should start */
	unsigned long		compact_cached_free_pfn;
	/* pfn where async and sync compaction migration scanner should start */
	unsigned long		compact_cached_migrate_pfn[2];
524 525
	unsigned long		compact_init_migrate_pfn;
	unsigned long		compact_init_free_pfn;
526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543
#endif

#ifdef CONFIG_COMPACTION
	/*
	 * On compaction failure, 1<<compact_defer_shift compactions
	 * are skipped before trying again. The number attempted since
	 * last failure is tracked with compact_considered.
	 */
	unsigned int		compact_considered;
	unsigned int		compact_defer_shift;
	int			compact_order_failed;
#endif

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* Set to true when the PG_migrate_skip bits should be cleared */
	bool			compact_blockskip_flush;
#endif

544 545
	bool			contiguous;

546 547 548
	ZONE_PADDING(_pad3_)
	/* Zone statistics */
	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
549
	atomic_long_t		vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
550
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
551

M
Mel Gorman 已提交
552 553
enum pgdat_flags {
	PGDAT_CONGESTED,		/* pgdat has many dirty pages backed by
554 555
					 * a congested BDI
					 */
M
Mel Gorman 已提交
556
	PGDAT_DIRTY,			/* reclaim scanning has recently found
557 558 559
					 * many dirty file pages at the tail
					 * of the LRU.
					 */
M
Mel Gorman 已提交
560
	PGDAT_WRITEBACK,		/* reclaim scanning has recently found
561 562
					 * many pages under writeback
					 */
563
	PGDAT_RECLAIM_LOCKED,		/* prevents concurrent reclaim */
J
Johannes Weiner 已提交
564
};
565

566 567 568 569 570 571
enum zone_flags {
	ZONE_BOOSTED_WATERMARK,		/* zone recently boosted watermarks.
					 * Cleared when kswapd is woken.
					 */
};

572 573 574 575 576
static inline unsigned long zone_managed_pages(struct zone *zone)
{
	return (unsigned long)atomic_long_read(&zone->managed_pages);
}

R
Russ Anderson 已提交
577
static inline unsigned long zone_end_pfn(const struct zone *zone)
578 579 580 581 582 583 584 585 586
{
	return zone->zone_start_pfn + zone->spanned_pages;
}

static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
	return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}

587 588
static inline bool zone_is_initialized(struct zone *zone)
{
589
	return zone->initialized;
590 591 592 593 594 595 596
}

static inline bool zone_is_empty(struct zone *zone)
{
	return zone->spanned_pages == 0;
}

597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612
/*
 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
 * intersection with the given zone
 */
static inline bool zone_intersects(struct zone *zone,
		unsigned long start_pfn, unsigned long nr_pages)
{
	if (zone_is_empty(zone))
		return false;
	if (start_pfn >= zone_end_pfn(zone) ||
	    start_pfn + nr_pages <= zone->zone_start_pfn)
		return false;

	return true;
}

L
Linus Torvalds 已提交
613 614 615 616 617 618 619
/*
 * The "priority" of VM scanning is how much of the queues we will scan in one
 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
 * queues ("queue_length >> 12") during an aging round.
 */
#define DEF_PRIORITY 12

620 621 622
/* Maximum number of zones on a zonelist */
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)

623 624
enum {
	ZONELIST_FALLBACK,	/* zonelist with fallback */
625
#ifdef CONFIG_NUMA
626 627 628 629 630
	/*
	 * The NUMA zonelists are doubled because we need zonelists that
	 * restrict the allocations to a single node for __GFP_THISNODE.
	 */
	ZONELIST_NOFALLBACK,	/* zonelist without fallback (__GFP_THISNODE) */
631
#endif
632 633
	MAX_ZONELISTS
};
634

635 636 637 638 639 640 641 642 643
/*
 * This struct contains information about a zone in a zonelist. It is stored
 * here to avoid dereferences into large structures and lookups of tables
 */
struct zoneref {
	struct zone *zone;	/* Pointer to actual zone */
	int zone_idx;		/* zone_idx(zoneref->zone) */
};

L
Linus Torvalds 已提交
644 645 646 647 648 649
/*
 * One allocation request operates on a zonelist. A zonelist
 * is a list of zones, the first one is the 'goal' of the
 * allocation, the other zones are fallback zones, in decreasing
 * priority.
 *
650 651 652 653 654 655 656
 * To speed the reading of the zonelist, the zonerefs contain the zone index
 * of the entry being read. Helper functions to access information given
 * a struct zoneref are
 *
 * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs
 * zonelist_zone_idx()	- Return the index of the zone for an entry
 * zonelist_node_idx()	- Return the index of the node for an entry
L
Linus Torvalds 已提交
657 658
 */
struct zonelist {
659
	struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
L
Linus Torvalds 已提交
660 661
};

662 663 664 665 666
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
#endif

L
Linus Torvalds 已提交
667 668
/*
 * On NUMA machines, each NUMA node would have a pg_data_t to describe
669 670
 * it's memory layout. On UMA machines there is a single pglist_data which
 * describes the whole memory.
L
Linus Torvalds 已提交
671 672 673 674 675 676 677
 *
 * Memory statistics and page replacement data structures are maintained on a
 * per-zone basis.
 */
struct bootmem_data;
typedef struct pglist_data {
	struct zone node_zones[MAX_NR_ZONES];
678
	struct zonelist node_zonelists[MAX_ZONELISTS];
L
Linus Torvalds 已提交
679
	int nr_zones;
680
#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
L
Linus Torvalds 已提交
681
	struct page *node_mem_map;
682 683 684
#ifdef CONFIG_PAGE_EXTENSION
	struct page_ext *node_page_ext;
#endif
A
Andy Whitcroft 已提交
685
#endif
686
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
687
	/*
688 689
	 * Must be held any time you expect node_start_pfn,
	 * node_present_pages, node_spanned_pages or nr_zones to stay constant.
690
	 *
691
	 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
692 693
	 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
	 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
694
	 *
695
	 * Nests above zone->lock and zone->span_seqlock
696 697 698
	 */
	spinlock_t node_size_lock;
#endif
L
Linus Torvalds 已提交
699 700 701 702 703 704
	unsigned long node_start_pfn;
	unsigned long node_present_pages; /* total number of physical pages */
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
	int node_id;
	wait_queue_head_t kswapd_wait;
705
	wait_queue_head_t pfmemalloc_wait;
706 707
	struct task_struct *kswapd;	/* Protected by
					   mem_hotplug_begin/end() */
708 709 710
	int kswapd_order;
	enum zone_type kswapd_classzone_idx;

711 712
	int kswapd_failures;		/* Number of 'reclaimed == 0' runs */

713 714 715 716 717
#ifdef CONFIG_COMPACTION
	int kcompactd_max_order;
	enum zone_type kcompactd_classzone_idx;
	wait_queue_head_t kcompactd_wait;
	struct task_struct *kcompactd;
718
#endif
719 720 721 722 723 724
	/*
	 * This is a per-node reserve of pages that are not available
	 * to userspace allocations.
	 */
	unsigned long		totalreserve_pages;

725 726 727 728 729 730 731 732
#ifdef CONFIG_NUMA
	/*
	 * zone reclaim becomes active if more unmapped pages exist.
	 */
	unsigned long		min_unmapped_pages;
	unsigned long		min_slab_pages;
#endif /* CONFIG_NUMA */

733 734 735
	/* Write-intensive fields used by page reclaim */
	ZONE_PADDING(_pad1_)
	spinlock_t		lru_lock;
736 737 738 739 740 741 742 743

#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
	/*
	 * If memory initialisation on large machines is deferred then this
	 * is the first PFN that needs to be initialised.
	 */
	unsigned long first_deferred_pfn;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
744 745 746 747 748 749

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	spinlock_t split_queue_lock;
	struct list_head split_queue;
	unsigned long split_queue_len;
#endif
750

M
Mel Gorman 已提交
751 752 753 754 755 756 757
	/* Fields commonly accessed by the page reclaim scanner */
	struct lruvec		lruvec;

	unsigned long		flags;

	ZONE_PADDING(_pad2_)

758 759 760
	/* Per-node vmstats */
	struct per_cpu_nodestat __percpu *per_cpu_nodestats;
	atomic_long_t		vm_stat[NR_VM_NODE_STAT_ITEMS];
L
Linus Torvalds 已提交
761 762 763 764
} pg_data_t;

#define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
A
Andy Whitcroft 已提交
765
#ifdef CONFIG_FLAT_NODE_MEM_MAP
766
#define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
A
Andy Whitcroft 已提交
767 768 769
#else
#define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
#endif
770
#define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
L
Linus Torvalds 已提交
771

772
#define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
773
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
774

775
static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
M
Mel Gorman 已提交
776
{
777
	return &pgdat->lruvec;
M
Mel Gorman 已提交
778 779
}

780 781 782 783 784 785 786 787 788
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
	return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}

static inline bool pgdat_is_empty(pg_data_t *pgdat)
{
	return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
789

790 791
#include <linux/memory_hotplug.h>

792
void build_all_zonelists(pg_data_t *pgdat);
793 794
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
		   enum zone_type classzone_idx);
795 796 797
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
			 int classzone_idx, unsigned int alloc_flags,
			 long free_pages);
798
bool zone_watermark_ok(struct zone *z, unsigned int order,
799 800
		unsigned long mark, int classzone_idx,
		unsigned int alloc_flags);
801
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
802
		unsigned long mark, int classzone_idx);
D
Dave Hansen 已提交
803 804 805 806
enum memmap_context {
	MEMMAP_EARLY,
	MEMMAP_HOTPLUG,
};
807
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
808
				     unsigned long size);
809

810
extern void lruvec_init(struct lruvec *lruvec);
811

M
Mel Gorman 已提交
812
static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
813
{
A
Andrew Morton 已提交
814
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
815
	return lruvec->pgdat;
816
#else
M
Mel Gorman 已提交
817
	return container_of(lruvec, struct pglist_data, lruvec);
818 819 820
#endif
}

821
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
822

L
Linus Torvalds 已提交
823 824 825 826 827 828
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif

829 830 831 832 833 834
#if defined(CONFIG_SPARSEMEM)
void memblocks_present(void);
#else
static inline void memblocks_present(void) {}
#endif

835 836 837 838 839 840
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
static inline int local_memory_node(int node_id) { return node_id; };
#endif

L
Linus Torvalds 已提交
841 842 843 844 845
/*
 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
 */
#define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)

846 847 848 849 850 851 852 853 854 855 856 857
#ifdef CONFIG_ZONE_DEVICE
static inline bool is_dev_zone(const struct zone *zone)
{
	return zone_idx(zone) == ZONE_DEVICE;
}
#else
static inline bool is_dev_zone(const struct zone *zone)
{
	return false;
}
#endif

858 859 860 861 862 863 864 865
/*
 * Returns true if a zone has pages managed by the buddy allocator.
 * All the reclaim decisions have to use this function rather than
 * populated_zone(). If the whole zone is reserved then we can easily
 * end up with populated_zone() && !managed_zone().
 */
static inline bool managed_zone(struct zone *zone)
{
866
	return zone_managed_pages(zone);
867 868 869 870
}

/* Returns true if a zone has memory */
static inline bool populated_zone(struct zone *zone)
871
{
872
	return zone->present_pages;
873 874
}

875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893
#ifdef CONFIG_NUMA
static inline int zone_to_nid(struct zone *zone)
{
	return zone->node;
}

static inline void zone_set_nid(struct zone *zone, int nid)
{
	zone->node = nid;
}
#else
static inline int zone_to_nid(struct zone *zone)
{
	return 0;
}

static inline void zone_set_nid(struct zone *zone, int nid) {}
#endif

M
Mel Gorman 已提交
894 895
extern int movable_zone;

896
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
897 898
static inline int zone_movable_is_highmem(void)
{
899
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
M
Mel Gorman 已提交
900 901
	return movable_zone == ZONE_HIGHMEM;
#else
902
	return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
M
Mel Gorman 已提交
903 904
#endif
}
905
#endif
M
Mel Gorman 已提交
906

907
static inline int is_highmem_idx(enum zone_type idx)
L
Linus Torvalds 已提交
908
{
909
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
910 911
	return (idx == ZONE_HIGHMEM ||
		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
912 913 914
#else
	return 0;
#endif
L
Linus Torvalds 已提交
915 916 917
}

/**
M
Mike Rapoport 已提交
918
 * is_highmem - helper function to quickly check if a struct zone is a
L
Linus Torvalds 已提交
919 920 921 922 923 924
 *              highmem zone or not.  This is an attempt to keep references
 *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
 * @zone - pointer to struct zone variable
 */
static inline int is_highmem(struct zone *zone)
{
925
#ifdef CONFIG_HIGHMEM
C
Chanho Min 已提交
926
	return is_highmem_idx(zone_idx(zone));
927 928 929
#else
	return 0;
#endif
L
Linus Torvalds 已提交
930 931 932 933
}

/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
934
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
935
					void __user *, size_t *, loff_t *);
936 937
int watermark_boost_factor_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
938 939
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
940
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
941
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
942
					void __user *, size_t *, loff_t *);
943
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
944
					void __user *, size_t *, loff_t *);
945
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
946
			void __user *, size_t *, loff_t *);
947
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
948
			void __user *, size_t *, loff_t *);
L
Linus Torvalds 已提交
949

950
extern int numa_zonelist_order_handler(struct ctl_table *, int,
951
			void __user *, size_t *, loff_t *);
952
extern char numa_zonelist_order[];
953
#define NUMA_ZONELIST_ORDER_LEN	16
954

955
#ifndef CONFIG_NEED_MULTIPLE_NODES
L
Linus Torvalds 已提交
956 957 958 959 960

extern struct pglist_data contig_page_data;
#define NODE_DATA(nid)		(&contig_page_data)
#define NODE_MEM_MAP(nid)	mem_map

961
#else /* CONFIG_NEED_MULTIPLE_NODES */
L
Linus Torvalds 已提交
962 963 964

#include <asm/mmzone.h>

965
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
966

967 968 969
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
970 971

/**
972
 * for_each_online_pgdat - helper macro to iterate over all online nodes
973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990
 * @pgdat - pointer to a pg_data_t variable
 */
#define for_each_online_pgdat(pgdat)			\
	for (pgdat = first_online_pgdat();		\
	     pgdat;					\
	     pgdat = next_online_pgdat(pgdat))
/**
 * for_each_zone - helper macro to iterate over all memory zones
 * @zone - pointer to struct zone variable
 *
 * The user only needs to declare the zone variable, for_each_zone
 * fills it in.
 */
#define for_each_zone(zone)			        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))

991 992 993 994 995 996 997 998
#define for_each_populated_zone(zone)		        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))			\
		if (!populated_zone(zone))		\
			; /* do nothing */		\
		else

999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
	return zoneref->zone;
}

static inline int zonelist_zone_idx(struct zoneref *zoneref)
{
	return zoneref->zone_idx;
}

static inline int zonelist_node_idx(struct zoneref *zoneref)
{
1011
	return zone_to_nid(zoneref->zone);
1012 1013
}

1014 1015 1016 1017
struct zoneref *__next_zones_zonelist(struct zoneref *z,
					enum zone_type highest_zoneidx,
					nodemask_t *nodes);

1018 1019 1020 1021 1022 1023 1024 1025
/**
 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
 * @z - The cursor used as a starting point for the search
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
 *
 * This function returns the next zone at or below a given zone index that is
 * within the allowed nodemask using a cursor as the starting point for the
1026 1027 1028
 * search. The zoneref returned is a cursor that represents the current zone
 * being examined. It should be advanced by one before calling
 * next_zones_zonelist again.
1029
 */
1030
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1031
					enum zone_type highest_zoneidx,
1032 1033 1034 1035 1036 1037
					nodemask_t *nodes)
{
	if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
		return z;
	return __next_zones_zonelist(z, highest_zoneidx, nodes);
}
1038

1039 1040 1041 1042 1043
/**
 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
 * @zonelist - The zonelist to search for a suitable zone
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
1044
 * @return - Zoneref pointer for the first suitable zone found (see below)
1045 1046 1047
 *
 * This function returns the first zone at or below a given zone index that is
 * within the allowed nodemask. The zoneref returned is a cursor that can be
1048 1049
 * used to iterate the zonelist with next_zones_zonelist by advancing it by
 * one before calling.
1050 1051 1052 1053
 *
 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
 * never NULL). This may happen either genuinely, or due to concurrent nodemask
 * update due to cpuset modification.
1054
 */
1055
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1056
					enum zone_type highest_zoneidx,
1057
					nodemask_t *nodes)
1058
{
1059
	return next_zones_zonelist(zonelist->_zonerefs,
1060
							highest_zoneidx, nodes);
1061 1062
}

1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
/**
 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 * @nodemask - Nodemask allowed by the allocator
 *
 * This iterator iterates though all zones at or below a given zone index and
 * within a given nodemask
 */
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1075
	for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z);	\
1076
		zone;							\
1077
		z = next_zones_zonelist(++z, highidx, nodemask),	\
1078 1079 1080 1081 1082 1083 1084 1085
			zone = zonelist_zone(z))

#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
	for (zone = z->zone;	\
		zone;							\
		z = next_zones_zonelist(++z, highidx, nodemask),	\
			zone = zonelist_zone(z))

1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096

/**
 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 *
 * This iterator iterates though all zones at or below a given zone index.
 */
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1097
	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1098

A
Andy Whitcroft 已提交
1099 1100 1101 1102
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif

1103
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
T
Tejun Heo 已提交
1104
	!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1105 1106
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
{
1107
	BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1108 1109
	return 0;
}
1110 1111
#endif

1112 1113 1114 1115
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn)		(0)
#endif

A
Andy Whitcroft 已提交
1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131
#ifdef CONFIG_SPARSEMEM

/*
 * SECTION_SHIFT    		#bits space required to store a section #
 *
 * PA_SECTION_SHIFT		physical address to/from section number
 * PFN_SECTION_SHIFT		pfn to/from section number
 */
#define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
#define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)

#define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)

#define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))

1132
#define SECTION_BLOCKFLAGS_BITS \
1133
	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1134

A
Andy Whitcroft 已提交
1135 1136 1137 1138
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif

1139 1140 1141 1142 1143 1144 1145 1146
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
{
	return pfn >> PFN_SECTION_SHIFT;
}
static inline unsigned long section_nr_to_pfn(unsigned long sec)
{
	return sec << PFN_SECTION_SHIFT;
}
1147

1148 1149 1150
#define SECTION_ALIGN_UP(pfn)	(((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn)	((pfn) & PAGE_SECTION_MASK)

A
Andy Whitcroft 已提交
1151
struct page;
1152
struct page_ext;
A
Andy Whitcroft 已提交
1153
struct mem_section {
A
Andy Whitcroft 已提交
1154 1155 1156 1157 1158
	/*
	 * This is, logically, a pointer to an array of struct
	 * pages.  However, it is stored with some other magic.
	 * (see sparse.c::sparse_init_one_section())
	 *
1159 1160 1161 1162
	 * Additionally during early boot we encode node id of
	 * the location of the section here to guide allocation.
	 * (see sparse.c::memory_present())
	 *
A
Andy Whitcroft 已提交
1163 1164 1165 1166
	 * Making it a UL at least makes someone do a cast
	 * before using it wrong.
	 */
	unsigned long section_mem_map;
1167 1168 1169

	/* See declaration of similar field in struct zone */
	unsigned long *pageblock_flags;
1170 1171
#ifdef CONFIG_PAGE_EXTENSION
	/*
1172
	 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1173 1174 1175 1176 1177
	 * section. (see page_ext.h about this.)
	 */
	struct page_ext *page_ext;
	unsigned long pad;
#endif
1178 1179 1180 1181
	/*
	 * WARNING: mem_section must be a power-of-2 in size for the
	 * calculation and use of SECTION_ROOT_MASK to make sense.
	 */
A
Andy Whitcroft 已提交
1182 1183
};

1184 1185 1186 1187 1188
#ifdef CONFIG_SPARSEMEM_EXTREME
#define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section))
#else
#define SECTIONS_PER_ROOT	1
#endif
B
Bob Picco 已提交
1189

1190
#define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
1191
#define NR_SECTION_ROOTS	DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1192
#define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
B
Bob Picco 已提交
1193

1194
#ifdef CONFIG_SPARSEMEM_EXTREME
1195
extern struct mem_section **mem_section;
B
Bob Picco 已提交
1196
#else
1197 1198
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
A
Andy Whitcroft 已提交
1199

A
Andy Whitcroft 已提交
1200 1201
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
1202 1203 1204 1205
#ifdef CONFIG_SPARSEMEM_EXTREME
	if (!mem_section)
		return NULL;
#endif
1206 1207 1208
	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
		return NULL;
	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
A
Andy Whitcroft 已提交
1209
}
1210
extern int __section_nr(struct mem_section* ms);
1211
extern unsigned long usemap_size(void);
A
Andy Whitcroft 已提交
1212 1213 1214

/*
 * We use the lower bits of the mem_map pointer to store
1215 1216 1217 1218 1219 1220 1221 1222 1223 1224
 * a little bit of information.  The pointer is calculated
 * as mem_map - section_nr_to_pfn(pnum).  The result is
 * aligned to the minimum alignment of the two values:
 *   1. All mem_map arrays are page-aligned.
 *   2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
 *      lowest bits.  PFN_SECTION_SHIFT is arch-specific
 *      (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
 *      worst combination is powerpc with 256k pages,
 *      which results in PFN_SECTION_SHIFT equal 6.
 * To sum it up, at least 6 bits are available.
A
Andy Whitcroft 已提交
1225 1226 1227
 */
#define	SECTION_MARKED_PRESENT	(1UL<<0)
#define SECTION_HAS_MEM_MAP	(1UL<<1)
1228 1229
#define SECTION_IS_ONLINE	(1UL<<2)
#define SECTION_MAP_LAST_BIT	(1UL<<3)
A
Andy Whitcroft 已提交
1230
#define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
1231
#define SECTION_NID_SHIFT	3
A
Andy Whitcroft 已提交
1232 1233 1234 1235 1236 1237 1238 1239

static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
	unsigned long map = section->section_mem_map;
	map &= SECTION_MAP_MASK;
	return (struct page *)map;
}

1240
static inline int present_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1241
{
B
Bob Picco 已提交
1242
	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
A
Andy Whitcroft 已提交
1243 1244
}

1245 1246 1247 1248 1249 1250
static inline int present_section_nr(unsigned long nr)
{
	return present_section(__nr_to_section(nr));
}

static inline int valid_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1251
{
B
Bob Picco 已提交
1252
	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
A
Andy Whitcroft 已提交
1253 1254 1255 1256 1257 1258 1259
}

static inline int valid_section_nr(unsigned long nr)
{
	return valid_section(__nr_to_section(nr));
}

1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276
static inline int online_section(struct mem_section *section)
{
	return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}

static inline int online_section_nr(unsigned long nr)
{
	return online_section(__nr_to_section(nr));
}

#ifdef CONFIG_MEMORY_HOTPLUG
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#ifdef CONFIG_MEMORY_HOTREMOVE
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#endif
#endif

A
Andy Whitcroft 已提交
1277 1278
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
A
Andy Whitcroft 已提交
1279
	return __nr_to_section(pfn_to_section_nr(pfn));
A
Andy Whitcroft 已提交
1280 1281
}

1282 1283
extern int __highest_present_section_nr;

1284
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
A
Andy Whitcroft 已提交
1285 1286 1287 1288
static inline int pfn_valid(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
A
Andy Whitcroft 已提交
1289
	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
A
Andy Whitcroft 已提交
1290
}
1291
#endif
A
Andy Whitcroft 已提交
1292

1293 1294 1295 1296 1297 1298 1299
static inline int pfn_present(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
}

A
Andy Whitcroft 已提交
1300 1301 1302 1303 1304 1305
/*
 * These are _only_ used during initialisation, therefore they
 * can use __initdata ...  They could have names to indicate
 * this restriction.
 */
#ifdef CONFIG_NUMA
1306 1307 1308 1309 1310
#define pfn_to_nid(pfn)							\
({									\
	unsigned long __pfn_to_nid_pfn = (pfn);				\
	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\
})
1311 1312
#else
#define pfn_to_nid(pfn)		(0)
A
Andy Whitcroft 已提交
1313 1314 1315 1316 1317 1318
#endif

#define early_pfn_valid(pfn)	pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init()	do {} while (0)
1319
#define sparse_index_init(_sec, _nid)  do {} while (0)
1320
#define pfn_present pfn_valid
A
Andy Whitcroft 已提交
1321 1322
#endif /* CONFIG_SPARSEMEM */

1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333
/*
 * During memory init memblocks map pfns to nids. The search is expensive and
 * this caches recent lookups. The implementation of __early_pfn_to_nid
 * may treat start/end as pfns or sections.
 */
struct mminit_pfnnid_cache {
	unsigned long last_start;
	unsigned long last_end;
	int last_nid;
};

A
Andy Whitcroft 已提交
1334 1335 1336 1337 1338 1339
#ifndef early_pfn_valid
#define early_pfn_valid(pfn)	(1)
#endif

void memory_present(int nid, unsigned long start, unsigned long end);

1340 1341
/*
 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
W
Wei Yang 已提交
1342
 * need to check pfn validity within that MAX_ORDER_NR_PAGES block.
1343 1344 1345 1346 1347 1348 1349 1350 1351
 * pfn_valid_within() should be used in this case; we optimise this away
 * when we have no holes within a MAX_ORDER_NR_PAGES block.
 */
#ifdef CONFIG_HOLES_IN_ZONE
#define pfn_valid_within(pfn) pfn_valid(pfn)
#else
#define pfn_valid_within(pfn) (1)
#endif

1352 1353 1354
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
/*
 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
1355 1356 1357 1358 1359 1360 1361 1362 1363
 * associated with it or not. This means that a struct page exists for this
 * pfn. The caller cannot assume the page is fully initialized in general.
 * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
 * will ensure the struct page is fully online and initialized. Special pages
 * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
 *
 * In FLATMEM, it is expected that holes always have valid memmap as long as
 * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
 * that a valid section has a memmap for the entire section.
1364 1365 1366 1367 1368 1369 1370 1371 1372
 *
 * However, an ARM, and maybe other embedded architectures in the future
 * free memmap backing holes to save memory on the assumption the memmap is
 * never used. The page_zone linkages are then broken even though pfn_valid()
 * returns true. A walker of the full memmap must then do this additional
 * check to ensure the memmap they are looking at is sane by making sure
 * the zone and PFN linkages are still valid. This is expensive, but walkers
 * of the full memmap are extremely rare.
 */
1373
bool memmap_valid_within(unsigned long pfn,
1374 1375
					struct page *page, struct zone *zone);
#else
1376
static inline bool memmap_valid_within(unsigned long pfn,
1377 1378
					struct page *page, struct zone *zone)
{
1379
	return true;
1380 1381 1382
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */

C
Christoph Lameter 已提交
1383
#endif /* !__GENERATING_BOUNDS.H */
L
Linus Torvalds 已提交
1384 1385
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */