mmzone.h 40.5 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H

#ifndef __ASSEMBLY__
C
Christoph Lameter 已提交
6
#ifndef __GENERATING_BOUNDS_H
L
Linus Torvalds 已提交
7 8 9 10

#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
11
#include <linux/bitops.h>
L
Linus Torvalds 已提交
12 13 14 15
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
16
#include <linux/seqlock.h>
17
#include <linux/nodemask.h>
18
#include <linux/pageblock-flags.h>
19
#include <linux/page-flags-layout.h>
A
Arun Sharma 已提交
20
#include <linux/atomic.h>
21 22
#include <linux/mm_types.h>
#include <linux/page-flags.h>
R
Ralf Baechle 已提交
23
#include <asm/page.h>
L
Linus Torvalds 已提交
24

25 26
#include <linux/ali_hotfix.h>

L
Linus Torvalds 已提交
27 28 29 30 31 32
/* Free memory management - zoned buddy allocator.  */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define MAX_ORDER 11
#else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif
33
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
L
Linus Torvalds 已提交
34

A
Andy Whitcroft 已提交
35 36 37
/*
 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
 * costly to service.  That is between allocation orders which should
38
 * coalesce naturally under reasonable reclaim pressure and those which
A
Andy Whitcroft 已提交
39 40 41 42
 * will not.
 */
#define PAGE_ALLOC_COSTLY_ORDER 3

43
enum migratetype {
44 45
	MIGRATE_UNMOVABLE,
	MIGRATE_MOVABLE,
46
	MIGRATE_RECLAIMABLE,
47 48
	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64
#ifdef CONFIG_CMA
	/*
	 * MIGRATE_CMA migration type is designed to mimic the way
	 * ZONE_MOVABLE works.  Only movable pages can be allocated
	 * from MIGRATE_CMA pageblocks and page allocator never
	 * implicitly change migration type of MIGRATE_CMA pageblock.
	 *
	 * The way to use it is to change migratetype of a range of
	 * pageblocks to MIGRATE_CMA which can be done by
	 * __free_pageblock_cma() function.  What is important though
	 * is that a range of pageblocks must be aligned to
	 * MAX_ORDER_NR_PAGES should biggest page be bigger then
	 * a single pageblock.
	 */
	MIGRATE_CMA,
#endif
65
#ifdef CONFIG_MEMORY_ISOLATION
66
	MIGRATE_ISOLATE,	/* can't allocate from here */
67
#endif
68 69 70
	MIGRATE_TYPES
};

71 72 73
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
extern char * const migratetype_names[MIGRATE_TYPES];

74 75
#ifdef CONFIG_CMA
#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
L
Laura Abbott 已提交
76
#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
77 78
#else
#  define is_migrate_cma(migratetype) false
L
Laura Abbott 已提交
79
#  define is_migrate_cma_page(_page) false
80
#endif
81

82 83 84 85 86
static inline bool is_migrate_movable(int mt)
{
	return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
}

87 88 89 90
#define for_each_migratetype_order(order, type) \
	for (order = 0; order < MAX_ORDER; order++) \
		for (type = 0; type < MIGRATE_TYPES; type++)

91 92
extern int page_group_by_mobility_disabled;

93 94 95
#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)

96 97 98 99
#define get_pageblock_migratetype(page)					\
	get_pfnblock_flags_mask(page, page_to_pfn(page),		\
			PB_migrate_end, MIGRATETYPE_MASK)

L
Linus Torvalds 已提交
100
struct free_area {
101
	struct list_head	free_list[MIGRATE_TYPES];
L
Linus Torvalds 已提交
102 103 104
	unsigned long		nr_free;
};

105 106 107 108 109 110 111 112 113 114 115 116
static inline struct page *get_page_from_free_area(struct free_area *area,
					    int migratetype)
{
	return list_first_entry_or_null(&area->free_list[migratetype],
					struct page, lru);
}

static inline bool free_area_empty(struct free_area *area, int migratetype)
{
	return list_empty(&area->free_list[migratetype]);
}

L
Linus Torvalds 已提交
117 118 119
struct pglist_data;

/*
120
 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
L
Linus Torvalds 已提交
121 122 123 124 125 126 127
 * So add a wild amount of padding here to ensure that they fall into separate
 * cachelines.  There are very few zone structures in the machine, so space
 * consumption is not a concern here.
 */
#if defined(CONFIG_SMP)
struct zone_padding {
	char x[0];
128
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
129 130 131 132 133
#define ZONE_PADDING(name)	struct zone_padding name;
#else
#define ZONE_PADDING(name)
#endif

134 135 136 137 138 139 140 141 142 143 144 145 146 147
#ifdef CONFIG_NUMA
enum numa_stat_item {
	NUMA_HIT,		/* allocated in intended node */
	NUMA_MISS,		/* allocated in non intended node */
	NUMA_FOREIGN,		/* was intended here, hit elsewhere */
	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
	NUMA_LOCAL,		/* allocation from local node */
	NUMA_OTHER,		/* allocation from other node */
	NR_VM_NUMA_STAT_ITEMS
};
#else
#define NR_VM_NUMA_STAT_ITEMS 0
#endif

148
enum zone_stat_item {
149
	/* First 128 byte cacheline (assuming 64 bit words) */
150
	NR_FREE_PAGES,
M
Minchan Kim 已提交
151 152 153 154 155 156
	NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
	NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
	NR_ZONE_ACTIVE_ANON,
	NR_ZONE_INACTIVE_FILE,
	NR_ZONE_ACTIVE_FILE,
	NR_ZONE_UNEVICTABLE,
157
	NR_ZONE_WRITE_PENDING,	/* Count of dirty, writeback and unstable pages */
N
Nick Piggin 已提交
158
	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
159
	NR_PAGETABLE,		/* used for pagetables */
160
	NR_KERNEL_STACK_KB,	/* measured in KiB */
161
	/* Second 128 byte cacheline */
162
	NR_BOUNCE,
M
Minchan Kim 已提交
163 164
#if IS_ENABLED(CONFIG_ZSMALLOC)
	NR_ZSPAGES,		/* allocated in zsmalloc */
165
#endif
166
	NR_FREE_CMA_PAGES,
167 168
	NR_VM_ZONE_STAT_ITEMS };

169
enum node_stat_item {
M
Mel Gorman 已提交
170 171 172 173 174 175
	NR_LRU_BASE,
	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */
	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */
	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
176 177
	NR_SLAB_RECLAIMABLE,
	NR_SLAB_UNRECLAIMABLE,
M
Mel Gorman 已提交
178 179
	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */
	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
180 181
	WORKINGSET_REFAULT,
	WORKINGSET_ACTIVATE,
182
	WORKINGSET_RESTORE,
183
	WORKINGSET_NODERECLAIM,
184
	NR_ANON_MAPPED,	/* Mapped anonymous pages */
185 186
	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
			   only modified from process context */
187 188 189 190 191 192 193 194 195
	NR_FILE_PAGES,
	NR_FILE_DIRTY,
	NR_WRITEBACK,
	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */
	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */
	NR_SHMEM_THPS,
	NR_SHMEM_PMDMAPPED,
	NR_ANON_THPS,
	NR_UNSTABLE_NFS,	/* NFS unstable pages */
196 197 198 199
	NR_VMSCAN_WRITE,
	NR_VMSCAN_IMMEDIATE,	/* Prioritise for reclaim when writeback ends */
	NR_DIRTIED,		/* page dirtyings since bootup */
	NR_WRITTEN,		/* page writings since bootup */
200
	NR_INDIRECTLY_RECLAIMABLE_BYTES, /* measured in bytes */
201 202 203
	NR_VM_NODE_STAT_ITEMS
};

204 205 206 207 208 209 210 211 212 213 214 215 216
/*
 * We do arithmetic on the LRU lists in various places in the code,
 * so it is important to keep the active lists LRU_ACTIVE higher in
 * the array than the corresponding inactive lists, and to keep
 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
 *
 * This has to be kept in sync with the statistics in zone_stat_item
 * above and the descriptions in vmstat_text in mm/vmstat.c
 */
#define LRU_BASE 0
#define LRU_ACTIVE 1
#define LRU_FILE 2

217
enum lru_list {
218 219 220 221
	LRU_INACTIVE_ANON = LRU_BASE,
	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
L
Lee Schermerhorn 已提交
222 223 224
	LRU_UNEVICTABLE,
	NR_LRU_LISTS
};
225

H
Hugh Dickins 已提交
226
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
227

H
Hugh Dickins 已提交
228
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
L
Lee Schermerhorn 已提交
229

H
Hugh Dickins 已提交
230
static inline int is_file_lru(enum lru_list lru)
231
{
H
Hugh Dickins 已提交
232
	return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
233 234
}

H
Hugh Dickins 已提交
235
static inline int is_active_lru(enum lru_list lru)
236
{
H
Hugh Dickins 已提交
237
	return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
238 239
}

240 241 242
struct zone_reclaim_stat {
	/*
	 * The pageout code in vmscan.c keeps track of how many of the
243
	 * mem/swap backed and file backed pages are referenced.
244 245 246 247 248 249 250 251 252
	 * The higher the rotated/scanned ratio, the more valuable
	 * that cache is.
	 *
	 * The anon LRU stats live in [0], file LRU stats in [1]
	 */
	unsigned long		recent_rotated[2];
	unsigned long		recent_scanned[2];
};

253
struct lruvec {
254 255 256 257
	struct list_head		lists[NR_LRU_LISTS];
	struct zone_reclaim_stat	reclaim_stat;
	/* Evictions & activations on the inactive file list */
	atomic_long_t			inactive_age;
258 259
	/* Refaults at the time of last reclaim cycle */
	unsigned long			refaults;
A
Andrew Morton 已提交
260
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
261
	struct pglist_data *pgdat;
262
#endif
263 264
};

265 266 267 268 269
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)

270
/* Isolate unmapped file */
271
#define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2)
272
/* Isolate for asynchronous migration */
273
#define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x4)
M
Minchan Kim 已提交
274 275
/* Isolate unevictable pages */
#define ISOLATE_UNEVICTABLE	((__force isolate_mode_t)0x8)
276 277

/* LRU Isolation modes. */
278
typedef unsigned __bitwise isolate_mode_t;
279

280 281 282 283 284 285 286
enum zone_watermarks {
	WMARK_MIN,
	WMARK_LOW,
	WMARK_HIGH,
	NR_WMARK
};

287 288 289 290
#define min_wmark_pages(z) (z->_watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->_watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH])
#define wmark_pages(z, i) (z->_watermark[i])
291

L
Linus Torvalds 已提交
292 293 294 295
struct per_cpu_pages {
	int count;		/* number of pages in the list */
	int high;		/* high watermark, emptying needed */
	int batch;		/* chunk size for buddy add/remove */
296 297 298

	/* Lists of pages, one per migrate type stored on the pcp-lists */
	struct list_head lists[MIGRATE_PCPTYPES];
L
Linus Torvalds 已提交
299 300 301
};

struct per_cpu_pageset {
302
	struct per_cpu_pages pcp;
303 304
#ifdef CONFIG_NUMA
	s8 expire;
305
	u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
306
#endif
307
#ifdef CONFIG_SMP
308
	s8 stat_threshold;
309 310
	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
311
};
312

313 314 315 316 317
struct per_cpu_nodestat {
	s8 stat_threshold;
	s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};

C
Christoph Lameter 已提交
318 319
#endif /* !__GENERATING_BOUNDS.H */

320
enum zone_type {
321
#ifdef CONFIG_ZONE_DMA
322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340
	/*
	 * ZONE_DMA is used when there are devices that are not able
	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
	 * carve out the portion of memory that is needed for these devices.
	 * The range is arch specific.
	 *
	 * Some examples
	 *
	 * Architecture		Limit
	 * ---------------------------
	 * parisc, ia64, sparc	<4G
	 * s390			<2G
	 * arm			Various
	 * alpha		Unlimited or 0-16MB.
	 *
	 * i386, x86_64 and multiple other arches
	 * 			<16M.
	 */
	ZONE_DMA,
341
#endif
342
#ifdef CONFIG_ZONE_DMA32
343 344 345 346 347 348
	/*
	 * x86_64 needs two ZONE_DMAs because it supports devices that are
	 * only able to do DMA to the lower 16M but also 32 bit devices that
	 * can only do DMA areas below 4G.
	 */
	ZONE_DMA32,
349
#endif
350 351 352 353 354 355
	/*
	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
	 * performed on pages in ZONE_NORMAL if the DMA devices support
	 * transfers to all addressable memory.
	 */
	ZONE_NORMAL,
356
#ifdef CONFIG_HIGHMEM
357 358 359 360 361 362 363 364 365
	/*
	 * A memory area that is only addressable by the kernel through
	 * mapping portions into its own address space. This is for example
	 * used by i386 to allow the kernel to address the memory beyond
	 * 900MB. The kernel will set up special mappings (page
	 * table entries on i386) for each page that the kernel needs to
	 * access.
	 */
	ZONE_HIGHMEM,
366
#endif
M
Mel Gorman 已提交
367
	ZONE_MOVABLE,
368 369 370
#ifdef CONFIG_ZONE_DEVICE
	ZONE_DEVICE,
#endif
C
Christoph Lameter 已提交
371
	__MAX_NR_ZONES
372

373
};
L
Linus Torvalds 已提交
374

C
Christoph Lameter 已提交
375 376
#ifndef __GENERATING_BOUNDS_H

L
Linus Torvalds 已提交
377
struct zone {
378
	/* Read-mostly fields */
379 380

	/* zone watermarks, access with *_wmark_pages(zone) macros */
381
	unsigned long _watermark[NR_WMARK];
382

383 384
	unsigned long nr_reserved_highatomic;

L
Linus Torvalds 已提交
385
	/*
386 387 388 389 390 391 392
	 * We don't know if the memory that we're going to allocate will be
	 * freeable or/and it will be released eventually, so to avoid totally
	 * wasting several GB of ram we must reserve some of the lower zone
	 * memory (otherwise we risk to run OOM on the lower zones despite
	 * there being tons of freeable ram on the higher zones).  This array is
	 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
	 * changes.
L
Linus Torvalds 已提交
393
	 */
394
	long lowmem_reserve[MAX_NR_ZONES];
395

396
#ifdef CONFIG_NUMA
397
	int node;
398 399
#endif
	struct pglist_data	*zone_pgdat;
400
	struct per_cpu_pageset __percpu *pageset;
401

402 403
#ifndef CONFIG_SPARSEMEM
	/*
404
	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
405 406 407 408 409
	 * In SPARSEMEM, this map is stored in struct mem_section
	 */
	unsigned long		*pageblock_flags;
#endif /* CONFIG_SPARSEMEM */

L
Linus Torvalds 已提交
410 411 412
	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
	unsigned long		zone_start_pfn;

413
	/*
414 415 416
	 * spanned_pages is the total pages spanned by the zone, including
	 * holes, which is calculated as:
	 * 	spanned_pages = zone_end_pfn - zone_start_pfn;
417
	 *
418 419
	 * present_pages is physical pages existing within the zone, which
	 * is calculated as:
420
	 *	present_pages = spanned_pages - absent_pages(pages in holes);
421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440
	 *
	 * managed_pages is present pages managed by the buddy system, which
	 * is calculated as (reserved_pages includes pages allocated by the
	 * bootmem allocator):
	 *	managed_pages = present_pages - reserved_pages;
	 *
	 * So present_pages may be used by memory hotplug or memory power
	 * management logic to figure out unmanaged pages by checking
	 * (present_pages - managed_pages). And managed_pages should be used
	 * by page allocator and vm scanner to calculate all kinds of watermarks
	 * and thresholds.
	 *
	 * Locking rules:
	 *
	 * zone_start_pfn and spanned_pages are protected by span_seqlock.
	 * It is a seqlock because it has to be read outside of zone->lock,
	 * and it is done in the main allocator path.  But, it is written
	 * quite infrequently.
	 *
	 * The span_seq lock is declared along with zone->lock because it is
441 442
	 * frequently read in proximity to zone->lock.  It's good to
	 * give them a chance of being in the same cacheline.
443
	 *
444
	 * Write access to present_pages at runtime should be protected by
445 446
	 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
	 * present_pages should get_online_mems() to get a stable value.
447 448 449 450 451 452
	 *
	 * Read access to managed_pages should be safe because it's unsigned
	 * long. Write access to zone->managed_pages and totalram_pages are
	 * protected by managed_page_count_lock at runtime. Idealy only
	 * adjust_managed_page_count() should be used instead of directly
	 * touching zone->managed_pages and totalram_pages.
453
	 */
454
	unsigned long		managed_pages;
455 456
	unsigned long		spanned_pages;
	unsigned long		present_pages;
457 458

	const char		*name;
L
Linus Torvalds 已提交
459

460 461 462 463 464 465 466 467 468
#ifdef CONFIG_MEMORY_ISOLATION
	/*
	 * Number of isolated pageblock. It is used to solve incorrect
	 * freepage counting problem due to racy retrieving migratetype
	 * of pageblock. Protected by zone->lock.
	 */
	unsigned long		nr_isolate_pageblock;
#endif

469 470 471 472 473
#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;
#endif

474
	int initialized;
475

476
	/* Write-intensive fields used from the page allocator */
477
	ZONE_PADDING(_pad1_)
478

479 480 481 482 483 484
	/* free areas of different sizes */
	struct free_area	free_area[MAX_ORDER];

	/* zone flags, see below */
	unsigned long		flags;

485
	/* Primarily protects free_area */
486 487
	spinlock_t		lock;

488
	/* Write-intensive fields used by compaction and vmstats. */
489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520
	ZONE_PADDING(_pad2_)

	/*
	 * When free pages are below this point, additional steps are taken
	 * when reading the number of free pages to avoid per-cpu counter
	 * drift allowing watermarks to be breached
	 */
	unsigned long percpu_drift_mark;

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* pfn where compaction free scanner should start */
	unsigned long		compact_cached_free_pfn;
	/* pfn where async and sync compaction migration scanner should start */
	unsigned long		compact_cached_migrate_pfn[2];
#endif

#ifdef CONFIG_COMPACTION
	/*
	 * On compaction failure, 1<<compact_defer_shift compactions
	 * are skipped before trying again. The number attempted since
	 * last failure is tracked with compact_considered.
	 */
	unsigned int		compact_considered;
	unsigned int		compact_defer_shift;
	int			compact_order_failed;
#endif

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* Set to true when the PG_migrate_skip bits should be cleared */
	bool			compact_blockskip_flush;
#endif

521 522
	bool			contiguous;

523 524 525
	ZONE_PADDING(_pad3_)
	/* Zone statistics */
	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
526
	atomic_long_t		vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
527 528 529

	ALI_HOTFIX_RESERVE(1)
	ALI_HOTFIX_RESERVE(2)
530
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
531

M
Mel Gorman 已提交
532 533
enum pgdat_flags {
	PGDAT_CONGESTED,		/* pgdat has many dirty pages backed by
534 535
					 * a congested BDI
					 */
M
Mel Gorman 已提交
536
	PGDAT_DIRTY,			/* reclaim scanning has recently found
537 538 539
					 * many dirty file pages at the tail
					 * of the LRU.
					 */
M
Mel Gorman 已提交
540
	PGDAT_WRITEBACK,		/* reclaim scanning has recently found
541 542
					 * many pages under writeback
					 */
543
	PGDAT_RECLAIM_LOCKED,		/* prevents concurrent reclaim */
J
Johannes Weiner 已提交
544
};
545

R
Russ Anderson 已提交
546
static inline unsigned long zone_end_pfn(const struct zone *zone)
547 548 549 550 551 552 553 554 555
{
	return zone->zone_start_pfn + zone->spanned_pages;
}

static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
	return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}

556 557
static inline bool zone_is_initialized(struct zone *zone)
{
558
	return zone->initialized;
559 560 561 562 563 564 565
}

static inline bool zone_is_empty(struct zone *zone)
{
	return zone->spanned_pages == 0;
}

566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581
/*
 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
 * intersection with the given zone
 */
static inline bool zone_intersects(struct zone *zone,
		unsigned long start_pfn, unsigned long nr_pages)
{
	if (zone_is_empty(zone))
		return false;
	if (start_pfn >= zone_end_pfn(zone) ||
	    start_pfn + nr_pages <= zone->zone_start_pfn)
		return false;

	return true;
}

L
Linus Torvalds 已提交
582 583 584 585 586 587 588
/*
 * The "priority" of VM scanning is how much of the queues we will scan in one
 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
 * queues ("queue_length >> 12") during an aging round.
 */
#define DEF_PRIORITY 12

589 590 591
/* Maximum number of zones on a zonelist */
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)

592 593
enum {
	ZONELIST_FALLBACK,	/* zonelist with fallback */
594
#ifdef CONFIG_NUMA
595 596 597 598 599
	/*
	 * The NUMA zonelists are doubled because we need zonelists that
	 * restrict the allocations to a single node for __GFP_THISNODE.
	 */
	ZONELIST_NOFALLBACK,	/* zonelist without fallback (__GFP_THISNODE) */
600
#endif
601 602
	MAX_ZONELISTS
};
603

604 605 606 607 608 609 610 611 612
/*
 * This struct contains information about a zone in a zonelist. It is stored
 * here to avoid dereferences into large structures and lookups of tables
 */
struct zoneref {
	struct zone *zone;	/* Pointer to actual zone */
	int zone_idx;		/* zone_idx(zoneref->zone) */
};

L
Linus Torvalds 已提交
613 614 615 616 617 618
/*
 * One allocation request operates on a zonelist. A zonelist
 * is a list of zones, the first one is the 'goal' of the
 * allocation, the other zones are fallback zones, in decreasing
 * priority.
 *
619 620 621 622 623 624 625
 * To speed the reading of the zonelist, the zonerefs contain the zone index
 * of the entry being read. Helper functions to access information given
 * a struct zoneref are
 *
 * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs
 * zonelist_zone_idx()	- Return the index of the zone for an entry
 * zonelist_node_idx()	- Return the index of the node for an entry
L
Linus Torvalds 已提交
626 627
 */
struct zonelist {
628
	struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
L
Linus Torvalds 已提交
629 630
};

631 632 633 634 635
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
#endif

636 637 638 639 640 641 642 643
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
struct deferred_split {
	spinlock_t split_queue_lock;
	struct list_head split_queue;
	unsigned long split_queue_len;
};
#endif

L
Linus Torvalds 已提交
644 645
/*
 * On NUMA machines, each NUMA node would have a pg_data_t to describe
646 647
 * it's memory layout. On UMA machines there is a single pglist_data which
 * describes the whole memory.
L
Linus Torvalds 已提交
648 649 650 651 652 653 654
 *
 * Memory statistics and page replacement data structures are maintained on a
 * per-zone basis.
 */
struct bootmem_data;
typedef struct pglist_data {
	struct zone node_zones[MAX_NR_ZONES];
655
	struct zonelist node_zonelists[MAX_ZONELISTS];
L
Linus Torvalds 已提交
656
	int nr_zones;
657
#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
L
Linus Torvalds 已提交
658
	struct page *node_mem_map;
659 660 661
#ifdef CONFIG_PAGE_EXTENSION
	struct page_ext *node_page_ext;
#endif
A
Andy Whitcroft 已提交
662
#endif
663
#ifndef CONFIG_NO_BOOTMEM
L
Linus Torvalds 已提交
664
	struct bootmem_data *bdata;
665
#endif
666
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
667 668 669 670 671
	/*
	 * Must be held any time you expect node_start_pfn, node_present_pages
	 * or node_spanned_pages stay constant.  Holding this will also
	 * guarantee that any pfn_valid() stays that way.
	 *
672
	 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
673 674
	 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
	 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
675
	 *
676
	 * Nests above zone->lock and zone->span_seqlock
677 678 679
	 */
	spinlock_t node_size_lock;
#endif
L
Linus Torvalds 已提交
680 681 682 683
	unsigned long node_start_pfn;
	unsigned long node_present_pages; /* total number of physical pages */
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
G
Gavin Shan 已提交
684 685 686 687 688
#ifdef CONFIG_KIDLED
	unsigned long node_idle_scan_pfn;
	u8 *node_page_age;
#endif

L
Linus Torvalds 已提交
689 690
	int node_id;
	wait_queue_head_t kswapd_wait;
691
	wait_queue_head_t pfmemalloc_wait;
692 693
	struct task_struct *kswapd;	/* Protected by
					   mem_hotplug_begin/end() */
694 695 696
	int kswapd_order;
	enum zone_type kswapd_classzone_idx;

697 698
	int kswapd_failures;		/* Number of 'reclaimed == 0' runs */

699 700 701 702 703
#ifdef CONFIG_COMPACTION
	int kcompactd_max_order;
	enum zone_type kcompactd_classzone_idx;
	wait_queue_head_t kcompactd_wait;
	struct task_struct *kcompactd;
704
#endif
705 706 707 708 709 710
	/*
	 * This is a per-node reserve of pages that are not available
	 * to userspace allocations.
	 */
	unsigned long		totalreserve_pages;

711 712 713 714 715 716 717 718
#ifdef CONFIG_NUMA
	/*
	 * zone reclaim becomes active if more unmapped pages exist.
	 */
	unsigned long		min_unmapped_pages;
	unsigned long		min_slab_pages;
#endif /* CONFIG_NUMA */

719 720 721
	/* Write-intensive fields used by page reclaim */
	ZONE_PADDING(_pad1_)
	spinlock_t		lru_lock;
722 723 724 725 726 727 728

#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
	/*
	 * If memory initialisation on large machines is deferred then this
	 * is the first PFN that needs to be initialised.
	 */
	unsigned long first_deferred_pfn;
729 730
	/* Number of non-deferred pages */
	unsigned long static_init_pgcnt;
731
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
732 733

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
734
	struct deferred_split deferred_split_queue;
735
#endif
736

M
Mel Gorman 已提交
737 738 739 740 741 742 743
	/* Fields commonly accessed by the page reclaim scanner */
	struct lruvec		lruvec;

	unsigned long		flags;

	ZONE_PADDING(_pad2_)

744 745 746
	/* Per-node vmstats */
	struct per_cpu_nodestat __percpu *per_cpu_nodestats;
	atomic_long_t		vm_stat[NR_VM_NODE_STAT_ITEMS];
747 748 749

	ALI_HOTFIX_RESERVE(1)
	ALI_HOTFIX_RESERVE(2)
L
Linus Torvalds 已提交
750 751 752 753
} pg_data_t;

#define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
A
Andy Whitcroft 已提交
754
#ifdef CONFIG_FLAT_NODE_MEM_MAP
755
#define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
A
Andy Whitcroft 已提交
756 757 758
#else
#define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
#endif
759
#define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
L
Linus Torvalds 已提交
760

761
#define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
762
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
763 764 765 766
static inline spinlock_t *zone_lru_lock(struct zone *zone)
{
	return &zone->zone_pgdat->lru_lock;
}
767

768
static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
M
Mel Gorman 已提交
769
{
770
	return &pgdat->lruvec;
M
Mel Gorman 已提交
771 772
}

773 774 775 776 777 778 779 780 781
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
	return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}

static inline bool pgdat_is_empty(pg_data_t *pgdat)
{
	return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
782

783 784
#include <linux/memory_hotplug.h>

785
void build_all_zonelists(pg_data_t *pgdat);
786 787
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
		   enum zone_type classzone_idx);
788 789 790
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
			 int classzone_idx, unsigned int alloc_flags,
			 long free_pages);
791
bool zone_watermark_ok(struct zone *z, unsigned int order,
792 793
		unsigned long mark, int classzone_idx,
		unsigned int alloc_flags);
794
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
795
		unsigned long mark, int classzone_idx);
D
Dave Hansen 已提交
796 797 798 799
enum memmap_context {
	MEMMAP_EARLY,
	MEMMAP_HOTPLUG,
};
800
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
801
				     unsigned long size);
802

803
extern void lruvec_init(struct lruvec *lruvec);
804

M
Mel Gorman 已提交
805
static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
806
{
A
Andrew Morton 已提交
807
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
808
	return lruvec->pgdat;
809
#else
M
Mel Gorman 已提交
810
	return container_of(lruvec, struct pglist_data, lruvec);
811 812 813
#endif
}

814
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
815

L
Linus Torvalds 已提交
816 817 818 819 820 821
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif

822 823 824 825 826 827
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
static inline int local_memory_node(int node_id) { return node_id; };
#endif

L
Linus Torvalds 已提交
828 829 830 831 832
/*
 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
 */
#define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)

833 834 835 836 837 838 839 840 841 842 843 844
#ifdef CONFIG_ZONE_DEVICE
static inline bool is_dev_zone(const struct zone *zone)
{
	return zone_idx(zone) == ZONE_DEVICE;
}
#else
static inline bool is_dev_zone(const struct zone *zone)
{
	return false;
}
#endif

845 846 847 848 849 850 851 852 853 854 855 856 857
/*
 * Returns true if a zone has pages managed by the buddy allocator.
 * All the reclaim decisions have to use this function rather than
 * populated_zone(). If the whole zone is reserved then we can easily
 * end up with populated_zone() && !managed_zone().
 */
static inline bool managed_zone(struct zone *zone)
{
	return zone->managed_pages;
}

/* Returns true if a zone has memory */
static inline bool populated_zone(struct zone *zone)
858
{
859
	return zone->present_pages;
860 861
}

862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880
#ifdef CONFIG_NUMA
static inline int zone_to_nid(struct zone *zone)
{
	return zone->node;
}

static inline void zone_set_nid(struct zone *zone, int nid)
{
	zone->node = nid;
}
#else
static inline int zone_to_nid(struct zone *zone)
{
	return 0;
}

static inline void zone_set_nid(struct zone *zone, int nid) {}
#endif

M
Mel Gorman 已提交
881 882
extern int movable_zone;

883
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
884 885
static inline int zone_movable_is_highmem(void)
{
886
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
M
Mel Gorman 已提交
887 888
	return movable_zone == ZONE_HIGHMEM;
#else
889
	return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
M
Mel Gorman 已提交
890 891
#endif
}
892
#endif
M
Mel Gorman 已提交
893

894
static inline int is_highmem_idx(enum zone_type idx)
L
Linus Torvalds 已提交
895
{
896
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
897 898
	return (idx == ZONE_HIGHMEM ||
		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
899 900 901
#else
	return 0;
#endif
L
Linus Torvalds 已提交
902 903 904 905 906 907 908 909 910 911
}

/**
 * is_highmem - helper function to quickly check if a struct zone is a 
 *              highmem zone or not.  This is an attempt to keep references
 *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
 * @zone - pointer to struct zone variable
 */
static inline int is_highmem(struct zone *zone)
{
912
#ifdef CONFIG_HIGHMEM
C
Chanho Min 已提交
913
	return is_highmem_idx(zone_idx(zone));
914 915 916
#else
	return 0;
#endif
L
Linus Torvalds 已提交
917 918 919 920
}

/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
921
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
922
					void __user *, size_t *, loff_t *);
923 924
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
925
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
926
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
927
					void __user *, size_t *, loff_t *);
928
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
929
					void __user *, size_t *, loff_t *);
930
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
931
			void __user *, size_t *, loff_t *);
932
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
933
			void __user *, size_t *, loff_t *);
L
Linus Torvalds 已提交
934

935
extern int numa_zonelist_order_handler(struct ctl_table *, int,
936
			void __user *, size_t *, loff_t *);
937
extern char numa_zonelist_order[];
938
#define NUMA_ZONELIST_ORDER_LEN	16
939

940
#ifndef CONFIG_NEED_MULTIPLE_NODES
L
Linus Torvalds 已提交
941 942 943 944 945

extern struct pglist_data contig_page_data;
#define NODE_DATA(nid)		(&contig_page_data)
#define NODE_MEM_MAP(nid)	mem_map

946
#else /* CONFIG_NEED_MULTIPLE_NODES */
L
Linus Torvalds 已提交
947 948 949

#include <asm/mmzone.h>

950
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
951

952 953 954
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
955 956

/**
957
 * for_each_online_pgdat - helper macro to iterate over all online nodes
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975
 * @pgdat - pointer to a pg_data_t variable
 */
#define for_each_online_pgdat(pgdat)			\
	for (pgdat = first_online_pgdat();		\
	     pgdat;					\
	     pgdat = next_online_pgdat(pgdat))
/**
 * for_each_zone - helper macro to iterate over all memory zones
 * @zone - pointer to struct zone variable
 *
 * The user only needs to declare the zone variable, for_each_zone
 * fills it in.
 */
#define for_each_zone(zone)			        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))

976 977 978 979 980 981 982 983
#define for_each_populated_zone(zone)		        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))			\
		if (!populated_zone(zone))		\
			; /* do nothing */		\
		else

984 985 986 987 988 989 990 991 992 993 994 995
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
	return zoneref->zone;
}

static inline int zonelist_zone_idx(struct zoneref *zoneref)
{
	return zoneref->zone_idx;
}

static inline int zonelist_node_idx(struct zoneref *zoneref)
{
996
	return zone_to_nid(zoneref->zone);
997 998
}

999 1000 1001 1002
struct zoneref *__next_zones_zonelist(struct zoneref *z,
					enum zone_type highest_zoneidx,
					nodemask_t *nodes);

1003 1004 1005 1006 1007 1008 1009 1010
/**
 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
 * @z - The cursor used as a starting point for the search
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
 *
 * This function returns the next zone at or below a given zone index that is
 * within the allowed nodemask using a cursor as the starting point for the
1011 1012 1013
 * search. The zoneref returned is a cursor that represents the current zone
 * being examined. It should be advanced by one before calling
 * next_zones_zonelist again.
1014
 */
1015
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
1016
					enum zone_type highest_zoneidx,
1017 1018 1019 1020 1021 1022
					nodemask_t *nodes)
{
	if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
		return z;
	return __next_zones_zonelist(z, highest_zoneidx, nodes);
}
1023

1024 1025 1026 1027 1028
/**
 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
 * @zonelist - The zonelist to search for a suitable zone
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
1029
 * @return - Zoneref pointer for the first suitable zone found (see below)
1030 1031 1032
 *
 * This function returns the first zone at or below a given zone index that is
 * within the allowed nodemask. The zoneref returned is a cursor that can be
1033 1034
 * used to iterate the zonelist with next_zones_zonelist by advancing it by
 * one before calling.
1035 1036 1037 1038
 *
 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
 * never NULL). This may happen either genuinely, or due to concurrent nodemask
 * update due to cpuset modification.
1039
 */
1040
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1041
					enum zone_type highest_zoneidx,
1042
					nodemask_t *nodes)
1043
{
1044
	return next_zones_zonelist(zonelist->_zonerefs,
1045
							highest_zoneidx, nodes);
1046 1047
}

1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059
/**
 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 * @nodemask - Nodemask allowed by the allocator
 *
 * This iterator iterates though all zones at or below a given zone index and
 * within a given nodemask
 */
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1060
	for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z);	\
1061
		zone;							\
1062
		z = next_zones_zonelist(++z, highidx, nodemask),	\
1063 1064 1065 1066 1067 1068 1069 1070
			zone = zonelist_zone(z))

#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
	for (zone = z->zone;	\
		zone;							\
		z = next_zones_zonelist(++z, highidx, nodemask),	\
			zone = zonelist_zone(z))

1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081

/**
 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 *
 * This iterator iterates though all zones at or below a given zone index.
 */
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1082
	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1083

A
Andy Whitcroft 已提交
1084 1085 1086 1087
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif

1088
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
T
Tejun Heo 已提交
1089
	!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1090 1091
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
{
1092
	BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1093 1094
	return 0;
}
1095 1096
#endif

1097 1098 1099 1100
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn)		(0)
#endif

A
Andy Whitcroft 已提交
1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116
#ifdef CONFIG_SPARSEMEM

/*
 * SECTION_SHIFT    		#bits space required to store a section #
 *
 * PA_SECTION_SHIFT		physical address to/from section number
 * PFN_SECTION_SHIFT		pfn to/from section number
 */
#define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
#define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)

#define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)

#define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))

1117
#define SECTION_BLOCKFLAGS_BITS \
1118
	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1119

A
Andy Whitcroft 已提交
1120 1121 1122 1123
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif

1124 1125 1126 1127 1128 1129 1130 1131
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
{
	return pfn >> PFN_SECTION_SHIFT;
}
static inline unsigned long section_nr_to_pfn(unsigned long sec)
{
	return sec << PFN_SECTION_SHIFT;
}
1132

1133 1134 1135
#define SECTION_ALIGN_UP(pfn)	(((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn)	((pfn) & PAGE_SECTION_MASK)

A
Andy Whitcroft 已提交
1136
struct page;
1137
struct page_ext;
A
Andy Whitcroft 已提交
1138
struct mem_section {
A
Andy Whitcroft 已提交
1139 1140 1141 1142 1143
	/*
	 * This is, logically, a pointer to an array of struct
	 * pages.  However, it is stored with some other magic.
	 * (see sparse.c::sparse_init_one_section())
	 *
1144 1145 1146 1147
	 * Additionally during early boot we encode node id of
	 * the location of the section here to guide allocation.
	 * (see sparse.c::memory_present())
	 *
A
Andy Whitcroft 已提交
1148 1149 1150 1151
	 * Making it a UL at least makes someone do a cast
	 * before using it wrong.
	 */
	unsigned long section_mem_map;
1152 1153 1154

	/* See declaration of similar field in struct zone */
	unsigned long *pageblock_flags;
1155 1156
#ifdef CONFIG_PAGE_EXTENSION
	/*
1157
	 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1158 1159 1160 1161 1162
	 * section. (see page_ext.h about this.)
	 */
	struct page_ext *page_ext;
	unsigned long pad;
#endif
1163 1164 1165 1166
	/*
	 * WARNING: mem_section must be a power-of-2 in size for the
	 * calculation and use of SECTION_ROOT_MASK to make sense.
	 */
A
Andy Whitcroft 已提交
1167 1168
};

1169 1170 1171 1172 1173
#ifdef CONFIG_SPARSEMEM_EXTREME
#define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section))
#else
#define SECTIONS_PER_ROOT	1
#endif
B
Bob Picco 已提交
1174

1175
#define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
1176
#define NR_SECTION_ROOTS	DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1177
#define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
B
Bob Picco 已提交
1178

1179
#ifdef CONFIG_SPARSEMEM_EXTREME
1180
extern struct mem_section **mem_section;
B
Bob Picco 已提交
1181
#else
1182 1183
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
A
Andy Whitcroft 已提交
1184

A
Andy Whitcroft 已提交
1185 1186
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
1187 1188 1189 1190
#ifdef CONFIG_SPARSEMEM_EXTREME
	if (!mem_section)
		return NULL;
#endif
1191 1192 1193
	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
		return NULL;
	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
A
Andy Whitcroft 已提交
1194
}
1195
extern int __section_nr(struct mem_section* ms);
1196
extern unsigned long usemap_size(void);
A
Andy Whitcroft 已提交
1197 1198 1199

/*
 * We use the lower bits of the mem_map pointer to store
1200 1201 1202 1203 1204 1205 1206 1207 1208 1209
 * a little bit of information.  The pointer is calculated
 * as mem_map - section_nr_to_pfn(pnum).  The result is
 * aligned to the minimum alignment of the two values:
 *   1. All mem_map arrays are page-aligned.
 *   2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
 *      lowest bits.  PFN_SECTION_SHIFT is arch-specific
 *      (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
 *      worst combination is powerpc with 256k pages,
 *      which results in PFN_SECTION_SHIFT equal 6.
 * To sum it up, at least 6 bits are available.
A
Andy Whitcroft 已提交
1210 1211 1212
 */
#define	SECTION_MARKED_PRESENT	(1UL<<0)
#define SECTION_HAS_MEM_MAP	(1UL<<1)
1213 1214
#define SECTION_IS_ONLINE	(1UL<<2)
#define SECTION_MAP_LAST_BIT	(1UL<<3)
A
Andy Whitcroft 已提交
1215
#define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
1216
#define SECTION_NID_SHIFT	3
A
Andy Whitcroft 已提交
1217 1218 1219 1220 1221 1222 1223 1224

static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
	unsigned long map = section->section_mem_map;
	map &= SECTION_MAP_MASK;
	return (struct page *)map;
}

1225
static inline int present_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1226
{
B
Bob Picco 已提交
1227
	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
A
Andy Whitcroft 已提交
1228 1229
}

1230 1231 1232 1233 1234 1235
static inline int present_section_nr(unsigned long nr)
{
	return present_section(__nr_to_section(nr));
}

static inline int valid_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1236
{
B
Bob Picco 已提交
1237
	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
A
Andy Whitcroft 已提交
1238 1239 1240 1241 1242 1243 1244
}

static inline int valid_section_nr(unsigned long nr)
{
	return valid_section(__nr_to_section(nr));
}

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261
static inline int online_section(struct mem_section *section)
{
	return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}

static inline int online_section_nr(unsigned long nr)
{
	return online_section(__nr_to_section(nr));
}

#ifdef CONFIG_MEMORY_HOTPLUG
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#ifdef CONFIG_MEMORY_HOTREMOVE
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#endif
#endif

A
Andy Whitcroft 已提交
1262 1263
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
A
Andy Whitcroft 已提交
1264
	return __nr_to_section(pfn_to_section_nr(pfn));
A
Andy Whitcroft 已提交
1265 1266
}

1267 1268
extern int __highest_present_section_nr;

1269
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
A
Andy Whitcroft 已提交
1270 1271 1272 1273
static inline int pfn_valid(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
A
Andy Whitcroft 已提交
1274
	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
A
Andy Whitcroft 已提交
1275
}
1276
#endif
A
Andy Whitcroft 已提交
1277

1278 1279 1280 1281 1282 1283 1284
static inline int pfn_present(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
}

A
Andy Whitcroft 已提交
1285 1286 1287 1288 1289 1290
/*
 * These are _only_ used during initialisation, therefore they
 * can use __initdata ...  They could have names to indicate
 * this restriction.
 */
#ifdef CONFIG_NUMA
1291 1292 1293 1294 1295
#define pfn_to_nid(pfn)							\
({									\
	unsigned long __pfn_to_nid_pfn = (pfn);				\
	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\
})
1296 1297
#else
#define pfn_to_nid(pfn)		(0)
A
Andy Whitcroft 已提交
1298 1299 1300 1301 1302 1303
#endif

#define early_pfn_valid(pfn)	pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init()	do {} while (0)
1304
#define sparse_index_init(_sec, _nid)  do {} while (0)
A
Andy Whitcroft 已提交
1305 1306
#endif /* CONFIG_SPARSEMEM */

1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317
/*
 * During memory init memblocks map pfns to nids. The search is expensive and
 * this caches recent lookups. The implementation of __early_pfn_to_nid
 * may treat start/end as pfns or sections.
 */
struct mminit_pfnnid_cache {
	unsigned long last_start;
	unsigned long last_end;
	int last_nid;
};

A
Andy Whitcroft 已提交
1318 1319 1320 1321 1322 1323
#ifndef early_pfn_valid
#define early_pfn_valid(pfn)	(1)
#endif

void memory_present(int nid, unsigned long start, unsigned long end);

1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335
/*
 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
 * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
 * pfn_valid_within() should be used in this case; we optimise this away
 * when we have no holes within a MAX_ORDER_NR_PAGES block.
 */
#ifdef CONFIG_HOLES_IN_ZONE
#define pfn_valid_within(pfn) pfn_valid(pfn)
#else
#define pfn_valid_within(pfn) (1)
#endif

1336 1337 1338
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
/*
 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
1339 1340 1341 1342 1343 1344 1345 1346 1347
 * associated with it or not. This means that a struct page exists for this
 * pfn. The caller cannot assume the page is fully initialized in general.
 * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
 * will ensure the struct page is fully online and initialized. Special pages
 * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
 *
 * In FLATMEM, it is expected that holes always have valid memmap as long as
 * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
 * that a valid section has a memmap for the entire section.
1348 1349 1350 1351 1352 1353 1354 1355 1356
 *
 * However, an ARM, and maybe other embedded architectures in the future
 * free memmap backing holes to save memory on the assumption the memmap is
 * never used. The page_zone linkages are then broken even though pfn_valid()
 * returns true. A walker of the full memmap must then do this additional
 * check to ensure the memmap they are looking at is sane by making sure
 * the zone and PFN linkages are still valid. This is expensive, but walkers
 * of the full memmap are extremely rare.
 */
1357
bool memmap_valid_within(unsigned long pfn,
1358 1359
					struct page *page, struct zone *zone);
#else
1360
static inline bool memmap_valid_within(unsigned long pfn,
1361 1362
					struct page *page, struct zone *zone)
{
1363
	return true;
1364 1365 1366
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */

C
Christoph Lameter 已提交
1367
#endif /* !__GENERATING_BOUNDS.H */
L
Linus Torvalds 已提交
1368 1369
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */