mmzone.h 39.8 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0 */
L
Linus Torvalds 已提交
2 3 4 5
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H

#ifndef __ASSEMBLY__
C
Christoph Lameter 已提交
6
#ifndef __GENERATING_BOUNDS_H
L
Linus Torvalds 已提交
7 8 9 10

#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
11
#include <linux/bitops.h>
L
Linus Torvalds 已提交
12 13 14 15
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
16
#include <linux/seqlock.h>
17
#include <linux/nodemask.h>
18
#include <linux/pageblock-flags.h>
19
#include <linux/page-flags-layout.h>
A
Arun Sharma 已提交
20
#include <linux/atomic.h>
R
Ralf Baechle 已提交
21
#include <asm/page.h>
L
Linus Torvalds 已提交
22 23 24 25 26 27 28

/* Free memory management - zoned buddy allocator.  */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define MAX_ORDER 11
#else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif
29
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
L
Linus Torvalds 已提交
30

A
Andy Whitcroft 已提交
31 32 33
/*
 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
 * costly to service.  That is between allocation orders which should
34
 * coalesce naturally under reasonable reclaim pressure and those which
A
Andy Whitcroft 已提交
35 36 37 38
 * will not.
 */
#define PAGE_ALLOC_COSTLY_ORDER 3

39
enum migratetype {
40 41
	MIGRATE_UNMOVABLE,
	MIGRATE_MOVABLE,
42
	MIGRATE_RECLAIMABLE,
43 44
	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
#ifdef CONFIG_CMA
	/*
	 * MIGRATE_CMA migration type is designed to mimic the way
	 * ZONE_MOVABLE works.  Only movable pages can be allocated
	 * from MIGRATE_CMA pageblocks and page allocator never
	 * implicitly change migration type of MIGRATE_CMA pageblock.
	 *
	 * The way to use it is to change migratetype of a range of
	 * pageblocks to MIGRATE_CMA which can be done by
	 * __free_pageblock_cma() function.  What is important though
	 * is that a range of pageblocks must be aligned to
	 * MAX_ORDER_NR_PAGES should biggest page be bigger then
	 * a single pageblock.
	 */
	MIGRATE_CMA,
#endif
61
#ifdef CONFIG_MEMORY_ISOLATION
62
	MIGRATE_ISOLATE,	/* can't allocate from here */
63
#endif
64 65 66
	MIGRATE_TYPES
};

67 68 69
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
extern char * const migratetype_names[MIGRATE_TYPES];

70 71
#ifdef CONFIG_CMA
#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
L
Laura Abbott 已提交
72
#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
73 74
#else
#  define is_migrate_cma(migratetype) false
L
Laura Abbott 已提交
75
#  define is_migrate_cma_page(_page) false
76
#endif
77

78 79 80 81 82
static inline bool is_migrate_movable(int mt)
{
	return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
}

83 84 85 86
#define for_each_migratetype_order(order, type) \
	for (order = 0; order < MAX_ORDER; order++) \
		for (type = 0; type < MIGRATE_TYPES; type++)

87 88
extern int page_group_by_mobility_disabled;

89 90 91
#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)

92 93 94 95
#define get_pageblock_migratetype(page)					\
	get_pfnblock_flags_mask(page, page_to_pfn(page),		\
			PB_migrate_end, MIGRATETYPE_MASK)

L
Linus Torvalds 已提交
96
struct free_area {
97
	struct list_head	free_list[MIGRATE_TYPES];
L
Linus Torvalds 已提交
98 99 100 101 102 103
	unsigned long		nr_free;
};

struct pglist_data;

/*
104
 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
L
Linus Torvalds 已提交
105 106 107 108 109 110 111
 * So add a wild amount of padding here to ensure that they fall into separate
 * cachelines.  There are very few zone structures in the machine, so space
 * consumption is not a concern here.
 */
#if defined(CONFIG_SMP)
struct zone_padding {
	char x[0];
112
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
113 114 115 116 117
#define ZONE_PADDING(name)	struct zone_padding name;
#else
#define ZONE_PADDING(name)
#endif

118 119 120 121 122 123 124 125 126 127 128 129 130 131
#ifdef CONFIG_NUMA
enum numa_stat_item {
	NUMA_HIT,		/* allocated in intended node */
	NUMA_MISS,		/* allocated in non intended node */
	NUMA_FOREIGN,		/* was intended here, hit elsewhere */
	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
	NUMA_LOCAL,		/* allocation from local node */
	NUMA_OTHER,		/* allocation from other node */
	NR_VM_NUMA_STAT_ITEMS
};
#else
#define NR_VM_NUMA_STAT_ITEMS 0
#endif

132
enum zone_stat_item {
133
	/* First 128 byte cacheline (assuming 64 bit words) */
134
	NR_FREE_PAGES,
M
Minchan Kim 已提交
135 136 137 138 139 140
	NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
	NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
	NR_ZONE_ACTIVE_ANON,
	NR_ZONE_INACTIVE_FILE,
	NR_ZONE_ACTIVE_FILE,
	NR_ZONE_UNEVICTABLE,
141
	NR_ZONE_WRITE_PENDING,	/* Count of dirty, writeback and unstable pages */
N
Nick Piggin 已提交
142
	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
143
	NR_PAGETABLE,		/* used for pagetables */
144
	NR_KERNEL_STACK_KB,	/* measured in KiB */
145
	/* Second 128 byte cacheline */
146
	NR_BOUNCE,
M
Minchan Kim 已提交
147 148
#if IS_ENABLED(CONFIG_ZSMALLOC)
	NR_ZSPAGES,		/* allocated in zsmalloc */
149
#endif
150
	NR_FREE_CMA_PAGES,
151 152
	NR_VM_ZONE_STAT_ITEMS };

153
enum node_stat_item {
M
Mel Gorman 已提交
154 155 156 157 158 159
	NR_LRU_BASE,
	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */
	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */
	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
160 161
	NR_SLAB_RECLAIMABLE,
	NR_SLAB_UNRECLAIMABLE,
M
Mel Gorman 已提交
162 163
	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */
	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
164 165 166
	WORKINGSET_REFAULT,
	WORKINGSET_ACTIVATE,
	WORKINGSET_NODERECLAIM,
167
	NR_ANON_MAPPED,	/* Mapped anonymous pages */
168 169
	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
			   only modified from process context */
170 171 172 173 174 175 176 177 178
	NR_FILE_PAGES,
	NR_FILE_DIRTY,
	NR_WRITEBACK,
	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */
	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */
	NR_SHMEM_THPS,
	NR_SHMEM_PMDMAPPED,
	NR_ANON_THPS,
	NR_UNSTABLE_NFS,	/* NFS unstable pages */
179 180 181 182
	NR_VMSCAN_WRITE,
	NR_VMSCAN_IMMEDIATE,	/* Prioritise for reclaim when writeback ends */
	NR_DIRTIED,		/* page dirtyings since bootup */
	NR_WRITTEN,		/* page writings since bootup */
183
	NR_KERNEL_MISC_RECLAIMABLE,	/* reclaimable non-slab kernel pages */
184 185 186
	NR_VM_NODE_STAT_ITEMS
};

187 188 189 190 191 192 193 194 195 196 197 198 199
/*
 * We do arithmetic on the LRU lists in various places in the code,
 * so it is important to keep the active lists LRU_ACTIVE higher in
 * the array than the corresponding inactive lists, and to keep
 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
 *
 * This has to be kept in sync with the statistics in zone_stat_item
 * above and the descriptions in vmstat_text in mm/vmstat.c
 */
#define LRU_BASE 0
#define LRU_ACTIVE 1
#define LRU_FILE 2

200
enum lru_list {
201 202 203 204
	LRU_INACTIVE_ANON = LRU_BASE,
	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
L
Lee Schermerhorn 已提交
205 206 207
	LRU_UNEVICTABLE,
	NR_LRU_LISTS
};
208

H
Hugh Dickins 已提交
209
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
210

H
Hugh Dickins 已提交
211
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
L
Lee Schermerhorn 已提交
212

H
Hugh Dickins 已提交
213
static inline int is_file_lru(enum lru_list lru)
214
{
H
Hugh Dickins 已提交
215
	return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
216 217
}

H
Hugh Dickins 已提交
218
static inline int is_active_lru(enum lru_list lru)
219
{
H
Hugh Dickins 已提交
220
	return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
221 222
}

223 224 225
struct zone_reclaim_stat {
	/*
	 * The pageout code in vmscan.c keeps track of how many of the
226
	 * mem/swap backed and file backed pages are referenced.
227 228 229 230 231 232 233 234 235
	 * The higher the rotated/scanned ratio, the more valuable
	 * that cache is.
	 *
	 * The anon LRU stats live in [0], file LRU stats in [1]
	 */
	unsigned long		recent_rotated[2];
	unsigned long		recent_scanned[2];
};

236
struct lruvec {
237 238 239 240
	struct list_head		lists[NR_LRU_LISTS];
	struct zone_reclaim_stat	reclaim_stat;
	/* Evictions & activations on the inactive file list */
	atomic_long_t			inactive_age;
241 242
	/* Refaults at the time of last reclaim cycle */
	unsigned long			refaults;
A
Andrew Morton 已提交
243
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
244
	struct pglist_data *pgdat;
245
#endif
246 247
};

248 249 250 251 252
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)

253
/* Isolate unmapped file */
254
#define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2)
255
/* Isolate for asynchronous migration */
256
#define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x4)
M
Minchan Kim 已提交
257 258
/* Isolate unevictable pages */
#define ISOLATE_UNEVICTABLE	((__force isolate_mode_t)0x8)
259 260

/* LRU Isolation modes. */
261
typedef unsigned __bitwise isolate_mode_t;
262

263 264 265 266 267 268 269 270 271 272 273
enum zone_watermarks {
	WMARK_MIN,
	WMARK_LOW,
	WMARK_HIGH,
	NR_WMARK
};

#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])

L
Linus Torvalds 已提交
274 275 276 277
struct per_cpu_pages {
	int count;		/* number of pages in the list */
	int high;		/* high watermark, emptying needed */
	int batch;		/* chunk size for buddy add/remove */
278 279 280

	/* Lists of pages, one per migrate type stored on the pcp-lists */
	struct list_head lists[MIGRATE_PCPTYPES];
L
Linus Torvalds 已提交
281 282 283
};

struct per_cpu_pageset {
284
	struct per_cpu_pages pcp;
285 286
#ifdef CONFIG_NUMA
	s8 expire;
287
	u16 vm_numa_stat_diff[NR_VM_NUMA_STAT_ITEMS];
288
#endif
289
#ifdef CONFIG_SMP
290
	s8 stat_threshold;
291 292
	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
293
};
294

295 296 297 298 299
struct per_cpu_nodestat {
	s8 stat_threshold;
	s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};

C
Christoph Lameter 已提交
300 301
#endif /* !__GENERATING_BOUNDS.H */

302
enum zone_type {
303
#ifdef CONFIG_ZONE_DMA
304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322
	/*
	 * ZONE_DMA is used when there are devices that are not able
	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
	 * carve out the portion of memory that is needed for these devices.
	 * The range is arch specific.
	 *
	 * Some examples
	 *
	 * Architecture		Limit
	 * ---------------------------
	 * parisc, ia64, sparc	<4G
	 * s390			<2G
	 * arm			Various
	 * alpha		Unlimited or 0-16MB.
	 *
	 * i386, x86_64 and multiple other arches
	 * 			<16M.
	 */
	ZONE_DMA,
323
#endif
324
#ifdef CONFIG_ZONE_DMA32
325 326 327 328 329 330
	/*
	 * x86_64 needs two ZONE_DMAs because it supports devices that are
	 * only able to do DMA to the lower 16M but also 32 bit devices that
	 * can only do DMA areas below 4G.
	 */
	ZONE_DMA32,
331
#endif
332 333 334 335 336 337
	/*
	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
	 * performed on pages in ZONE_NORMAL if the DMA devices support
	 * transfers to all addressable memory.
	 */
	ZONE_NORMAL,
338
#ifdef CONFIG_HIGHMEM
339 340 341 342 343 344 345 346 347
	/*
	 * A memory area that is only addressable by the kernel through
	 * mapping portions into its own address space. This is for example
	 * used by i386 to allow the kernel to address the memory beyond
	 * 900MB. The kernel will set up special mappings (page
	 * table entries on i386) for each page that the kernel needs to
	 * access.
	 */
	ZONE_HIGHMEM,
348
#endif
M
Mel Gorman 已提交
349
	ZONE_MOVABLE,
350 351 352
#ifdef CONFIG_ZONE_DEVICE
	ZONE_DEVICE,
#endif
C
Christoph Lameter 已提交
353
	__MAX_NR_ZONES
354

355
};
L
Linus Torvalds 已提交
356

C
Christoph Lameter 已提交
357 358
#ifndef __GENERATING_BOUNDS_H

L
Linus Torvalds 已提交
359
struct zone {
360
	/* Read-mostly fields */
361 362 363 364

	/* zone watermarks, access with *_wmark_pages(zone) macros */
	unsigned long watermark[NR_WMARK];

365 366
	unsigned long nr_reserved_highatomic;

L
Linus Torvalds 已提交
367
	/*
368 369 370 371 372 373 374
	 * We don't know if the memory that we're going to allocate will be
	 * freeable or/and it will be released eventually, so to avoid totally
	 * wasting several GB of ram we must reserve some of the lower zone
	 * memory (otherwise we risk to run OOM on the lower zones despite
	 * there being tons of freeable ram on the higher zones).  This array is
	 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
	 * changes.
L
Linus Torvalds 已提交
375
	 */
376
	long lowmem_reserve[MAX_NR_ZONES];
377

378
#ifdef CONFIG_NUMA
379
	int node;
380 381
#endif
	struct pglist_data	*zone_pgdat;
382
	struct per_cpu_pageset __percpu *pageset;
383

384 385
#ifndef CONFIG_SPARSEMEM
	/*
386
	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
387 388 389 390 391
	 * In SPARSEMEM, this map is stored in struct mem_section
	 */
	unsigned long		*pageblock_flags;
#endif /* CONFIG_SPARSEMEM */

L
Linus Torvalds 已提交
392 393 394
	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
	unsigned long		zone_start_pfn;

395
	/*
396 397 398
	 * spanned_pages is the total pages spanned by the zone, including
	 * holes, which is calculated as:
	 * 	spanned_pages = zone_end_pfn - zone_start_pfn;
399
	 *
400 401
	 * present_pages is physical pages existing within the zone, which
	 * is calculated as:
402
	 *	present_pages = spanned_pages - absent_pages(pages in holes);
403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
	 *
	 * managed_pages is present pages managed by the buddy system, which
	 * is calculated as (reserved_pages includes pages allocated by the
	 * bootmem allocator):
	 *	managed_pages = present_pages - reserved_pages;
	 *
	 * So present_pages may be used by memory hotplug or memory power
	 * management logic to figure out unmanaged pages by checking
	 * (present_pages - managed_pages). And managed_pages should be used
	 * by page allocator and vm scanner to calculate all kinds of watermarks
	 * and thresholds.
	 *
	 * Locking rules:
	 *
	 * zone_start_pfn and spanned_pages are protected by span_seqlock.
	 * It is a seqlock because it has to be read outside of zone->lock,
	 * and it is done in the main allocator path.  But, it is written
	 * quite infrequently.
	 *
	 * The span_seq lock is declared along with zone->lock because it is
423 424
	 * frequently read in proximity to zone->lock.  It's good to
	 * give them a chance of being in the same cacheline.
425
	 *
426
	 * Write access to present_pages at runtime should be protected by
427 428
	 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
	 * present_pages should get_online_mems() to get a stable value.
429 430 431 432 433 434
	 *
	 * Read access to managed_pages should be safe because it's unsigned
	 * long. Write access to zone->managed_pages and totalram_pages are
	 * protected by managed_page_count_lock at runtime. Idealy only
	 * adjust_managed_page_count() should be used instead of directly
	 * touching zone->managed_pages and totalram_pages.
435
	 */
436
	unsigned long		managed_pages;
437 438
	unsigned long		spanned_pages;
	unsigned long		present_pages;
439 440

	const char		*name;
L
Linus Torvalds 已提交
441

442 443 444 445 446 447 448 449 450
#ifdef CONFIG_MEMORY_ISOLATION
	/*
	 * Number of isolated pageblock. It is used to solve incorrect
	 * freepage counting problem due to racy retrieving migratetype
	 * of pageblock. Protected by zone->lock.
	 */
	unsigned long		nr_isolate_pageblock;
#endif

451 452 453 454 455
#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;
#endif

456
	int initialized;
457

458
	/* Write-intensive fields used from the page allocator */
459
	ZONE_PADDING(_pad1_)
460

461 462 463 464 465 466
	/* free areas of different sizes */
	struct free_area	free_area[MAX_ORDER];

	/* zone flags, see below */
	unsigned long		flags;

467
	/* Primarily protects free_area */
468 469
	spinlock_t		lock;

470
	/* Write-intensive fields used by compaction and vmstats. */
471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502
	ZONE_PADDING(_pad2_)

	/*
	 * When free pages are below this point, additional steps are taken
	 * when reading the number of free pages to avoid per-cpu counter
	 * drift allowing watermarks to be breached
	 */
	unsigned long percpu_drift_mark;

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* pfn where compaction free scanner should start */
	unsigned long		compact_cached_free_pfn;
	/* pfn where async and sync compaction migration scanner should start */
	unsigned long		compact_cached_migrate_pfn[2];
#endif

#ifdef CONFIG_COMPACTION
	/*
	 * On compaction failure, 1<<compact_defer_shift compactions
	 * are skipped before trying again. The number attempted since
	 * last failure is tracked with compact_considered.
	 */
	unsigned int		compact_considered;
	unsigned int		compact_defer_shift;
	int			compact_order_failed;
#endif

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* Set to true when the PG_migrate_skip bits should be cleared */
	bool			compact_blockskip_flush;
#endif

503 504
	bool			contiguous;

505 506 507
	ZONE_PADDING(_pad3_)
	/* Zone statistics */
	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
508
	atomic_long_t		vm_numa_stat[NR_VM_NUMA_STAT_ITEMS];
509
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
510

M
Mel Gorman 已提交
511 512
enum pgdat_flags {
	PGDAT_CONGESTED,		/* pgdat has many dirty pages backed by
513 514
					 * a congested BDI
					 */
M
Mel Gorman 已提交
515
	PGDAT_DIRTY,			/* reclaim scanning has recently found
516 517 518
					 * many dirty file pages at the tail
					 * of the LRU.
					 */
M
Mel Gorman 已提交
519
	PGDAT_WRITEBACK,		/* reclaim scanning has recently found
520 521
					 * many pages under writeback
					 */
522
	PGDAT_RECLAIM_LOCKED,		/* prevents concurrent reclaim */
J
Johannes Weiner 已提交
523
};
524

R
Russ Anderson 已提交
525
static inline unsigned long zone_end_pfn(const struct zone *zone)
526 527 528 529 530 531 532 533 534
{
	return zone->zone_start_pfn + zone->spanned_pages;
}

static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
	return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}

535 536
static inline bool zone_is_initialized(struct zone *zone)
{
537
	return zone->initialized;
538 539 540 541 542 543 544
}

static inline bool zone_is_empty(struct zone *zone)
{
	return zone->spanned_pages == 0;
}

545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560
/*
 * Return true if [start_pfn, start_pfn + nr_pages) range has a non-empty
 * intersection with the given zone
 */
static inline bool zone_intersects(struct zone *zone,
		unsigned long start_pfn, unsigned long nr_pages)
{
	if (zone_is_empty(zone))
		return false;
	if (start_pfn >= zone_end_pfn(zone) ||
	    start_pfn + nr_pages <= zone->zone_start_pfn)
		return false;

	return true;
}

L
Linus Torvalds 已提交
561 562 563 564 565 566 567
/*
 * The "priority" of VM scanning is how much of the queues we will scan in one
 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
 * queues ("queue_length >> 12") during an aging round.
 */
#define DEF_PRIORITY 12

568 569 570
/* Maximum number of zones on a zonelist */
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)

571 572
enum {
	ZONELIST_FALLBACK,	/* zonelist with fallback */
573
#ifdef CONFIG_NUMA
574 575 576 577 578
	/*
	 * The NUMA zonelists are doubled because we need zonelists that
	 * restrict the allocations to a single node for __GFP_THISNODE.
	 */
	ZONELIST_NOFALLBACK,	/* zonelist without fallback (__GFP_THISNODE) */
579
#endif
580 581
	MAX_ZONELISTS
};
582

583 584 585 586 587 588 589 590 591
/*
 * This struct contains information about a zone in a zonelist. It is stored
 * here to avoid dereferences into large structures and lookups of tables
 */
struct zoneref {
	struct zone *zone;	/* Pointer to actual zone */
	int zone_idx;		/* zone_idx(zoneref->zone) */
};

L
Linus Torvalds 已提交
592 593 594 595 596 597
/*
 * One allocation request operates on a zonelist. A zonelist
 * is a list of zones, the first one is the 'goal' of the
 * allocation, the other zones are fallback zones, in decreasing
 * priority.
 *
598 599 600 601 602 603 604
 * To speed the reading of the zonelist, the zonerefs contain the zone index
 * of the entry being read. Helper functions to access information given
 * a struct zoneref are
 *
 * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs
 * zonelist_zone_idx()	- Return the index of the zone for an entry
 * zonelist_node_idx()	- Return the index of the node for an entry
L
Linus Torvalds 已提交
605 606
 */
struct zonelist {
607
	struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
L
Linus Torvalds 已提交
608 609
};

610 611 612 613 614
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
#endif

L
Linus Torvalds 已提交
615 616
/*
 * On NUMA machines, each NUMA node would have a pg_data_t to describe
617 618
 * it's memory layout. On UMA machines there is a single pglist_data which
 * describes the whole memory.
L
Linus Torvalds 已提交
619 620 621 622 623 624 625
 *
 * Memory statistics and page replacement data structures are maintained on a
 * per-zone basis.
 */
struct bootmem_data;
typedef struct pglist_data {
	struct zone node_zones[MAX_NR_ZONES];
626
	struct zonelist node_zonelists[MAX_ZONELISTS];
L
Linus Torvalds 已提交
627
	int nr_zones;
628
#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
L
Linus Torvalds 已提交
629
	struct page *node_mem_map;
630 631 632
#ifdef CONFIG_PAGE_EXTENSION
	struct page_ext *node_page_ext;
#endif
A
Andy Whitcroft 已提交
633
#endif
634
#ifndef CONFIG_NO_BOOTMEM
L
Linus Torvalds 已提交
635
	struct bootmem_data *bdata;
636
#endif
637
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
638 639 640 641 642
	/*
	 * Must be held any time you expect node_start_pfn, node_present_pages
	 * or node_spanned_pages stay constant.  Holding this will also
	 * guarantee that any pfn_valid() stays that way.
	 *
643
	 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
644 645
	 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
	 * or CONFIG_DEFERRED_STRUCT_PAGE_INIT.
646
	 *
647
	 * Nests above zone->lock and zone->span_seqlock
648 649 650
	 */
	spinlock_t node_size_lock;
#endif
L
Linus Torvalds 已提交
651 652 653 654 655 656
	unsigned long node_start_pfn;
	unsigned long node_present_pages; /* total number of physical pages */
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
	int node_id;
	wait_queue_head_t kswapd_wait;
657
	wait_queue_head_t pfmemalloc_wait;
658 659
	struct task_struct *kswapd;	/* Protected by
					   mem_hotplug_begin/end() */
660 661 662
	int kswapd_order;
	enum zone_type kswapd_classzone_idx;

663 664
	int kswapd_failures;		/* Number of 'reclaimed == 0' runs */

665 666 667 668 669
#ifdef CONFIG_COMPACTION
	int kcompactd_max_order;
	enum zone_type kcompactd_classzone_idx;
	wait_queue_head_t kcompactd_wait;
	struct task_struct *kcompactd;
670
#endif
671 672 673 674 675 676
	/*
	 * This is a per-node reserve of pages that are not available
	 * to userspace allocations.
	 */
	unsigned long		totalreserve_pages;

677 678 679 680 681 682 683 684
#ifdef CONFIG_NUMA
	/*
	 * zone reclaim becomes active if more unmapped pages exist.
	 */
	unsigned long		min_unmapped_pages;
	unsigned long		min_slab_pages;
#endif /* CONFIG_NUMA */

685 686 687
	/* Write-intensive fields used by page reclaim */
	ZONE_PADDING(_pad1_)
	spinlock_t		lru_lock;
688 689 690 691 692 693 694

#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
	/*
	 * If memory initialisation on large machines is deferred then this
	 * is the first PFN that needs to be initialised.
	 */
	unsigned long first_deferred_pfn;
695 696
	/* Number of non-deferred pages */
	unsigned long static_init_pgcnt;
697
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
698 699 700 701 702 703

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	spinlock_t split_queue_lock;
	struct list_head split_queue;
	unsigned long split_queue_len;
#endif
704

M
Mel Gorman 已提交
705 706 707 708 709 710 711
	/* Fields commonly accessed by the page reclaim scanner */
	struct lruvec		lruvec;

	unsigned long		flags;

	ZONE_PADDING(_pad2_)

712 713 714
	/* Per-node vmstats */
	struct per_cpu_nodestat __percpu *per_cpu_nodestats;
	atomic_long_t		vm_stat[NR_VM_NODE_STAT_ITEMS];
L
Linus Torvalds 已提交
715 716 717 718
} pg_data_t;

#define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
A
Andy Whitcroft 已提交
719
#ifdef CONFIG_FLAT_NODE_MEM_MAP
720
#define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
A
Andy Whitcroft 已提交
721 722 723
#else
#define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
#endif
724
#define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
L
Linus Torvalds 已提交
725

726
#define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
727
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
728 729 730 731
static inline spinlock_t *zone_lru_lock(struct zone *zone)
{
	return &zone->zone_pgdat->lru_lock;
}
732

733
static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
M
Mel Gorman 已提交
734
{
735
	return &pgdat->lruvec;
M
Mel Gorman 已提交
736 737
}

738 739 740 741 742 743 744 745 746
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
	return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}

static inline bool pgdat_is_empty(pg_data_t *pgdat)
{
	return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
747

748 749
#include <linux/memory_hotplug.h>

750
void build_all_zonelists(pg_data_t *pgdat);
751 752
void wakeup_kswapd(struct zone *zone, gfp_t gfp_mask, int order,
		   enum zone_type classzone_idx);
753 754 755
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
			 int classzone_idx, unsigned int alloc_flags,
			 long free_pages);
756
bool zone_watermark_ok(struct zone *z, unsigned int order,
757 758
		unsigned long mark, int classzone_idx,
		unsigned int alloc_flags);
759
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
760
		unsigned long mark, int classzone_idx);
D
Dave Hansen 已提交
761 762 763 764
enum memmap_context {
	MEMMAP_EARLY,
	MEMMAP_HOTPLUG,
};
765
extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
766
				     unsigned long size);
767

768
extern void lruvec_init(struct lruvec *lruvec);
769

M
Mel Gorman 已提交
770
static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
771
{
A
Andrew Morton 已提交
772
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
773
	return lruvec->pgdat;
774
#else
M
Mel Gorman 已提交
775
	return container_of(lruvec, struct pglist_data, lruvec);
776 777 778
#endif
}

779
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
780

L
Linus Torvalds 已提交
781 782 783 784 785 786
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif

787 788 789 790 791 792
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
static inline int local_memory_node(int node_id) { return node_id; };
#endif

L
Linus Torvalds 已提交
793 794 795 796 797
/*
 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
 */
#define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)

798 799 800 801 802 803 804 805 806 807 808 809
#ifdef CONFIG_ZONE_DEVICE
static inline bool is_dev_zone(const struct zone *zone)
{
	return zone_idx(zone) == ZONE_DEVICE;
}
#else
static inline bool is_dev_zone(const struct zone *zone)
{
	return false;
}
#endif

810 811 812 813 814 815 816 817 818 819 820 821 822
/*
 * Returns true if a zone has pages managed by the buddy allocator.
 * All the reclaim decisions have to use this function rather than
 * populated_zone(). If the whole zone is reserved then we can easily
 * end up with populated_zone() && !managed_zone().
 */
static inline bool managed_zone(struct zone *zone)
{
	return zone->managed_pages;
}

/* Returns true if a zone has memory */
static inline bool populated_zone(struct zone *zone)
823
{
824
	return zone->present_pages;
825 826
}

827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845
#ifdef CONFIG_NUMA
static inline int zone_to_nid(struct zone *zone)
{
	return zone->node;
}

static inline void zone_set_nid(struct zone *zone, int nid)
{
	zone->node = nid;
}
#else
static inline int zone_to_nid(struct zone *zone)
{
	return 0;
}

static inline void zone_set_nid(struct zone *zone, int nid) {}
#endif

M
Mel Gorman 已提交
846 847
extern int movable_zone;

848
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
849 850
static inline int zone_movable_is_highmem(void)
{
851
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
M
Mel Gorman 已提交
852 853
	return movable_zone == ZONE_HIGHMEM;
#else
854
	return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
M
Mel Gorman 已提交
855 856
#endif
}
857
#endif
M
Mel Gorman 已提交
858

859
static inline int is_highmem_idx(enum zone_type idx)
L
Linus Torvalds 已提交
860
{
861
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
862 863
	return (idx == ZONE_HIGHMEM ||
		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
864 865 866
#else
	return 0;
#endif
L
Linus Torvalds 已提交
867 868 869 870 871 872 873 874 875 876
}

/**
 * is_highmem - helper function to quickly check if a struct zone is a 
 *              highmem zone or not.  This is an attempt to keep references
 *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
 * @zone - pointer to struct zone variable
 */
static inline int is_highmem(struct zone *zone)
{
877
#ifdef CONFIG_HIGHMEM
C
Chanho Min 已提交
878
	return is_highmem_idx(zone_idx(zone));
879 880 881
#else
	return 0;
#endif
L
Linus Torvalds 已提交
882 883 884 885
}

/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
886
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
887
					void __user *, size_t *, loff_t *);
888 889
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
890
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES];
891
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
892
					void __user *, size_t *, loff_t *);
893
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
894
					void __user *, size_t *, loff_t *);
895
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
896
			void __user *, size_t *, loff_t *);
897
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
898
			void __user *, size_t *, loff_t *);
L
Linus Torvalds 已提交
899

900
extern int numa_zonelist_order_handler(struct ctl_table *, int,
901
			void __user *, size_t *, loff_t *);
902
extern char numa_zonelist_order[];
903
#define NUMA_ZONELIST_ORDER_LEN	16
904

905
#ifndef CONFIG_NEED_MULTIPLE_NODES
L
Linus Torvalds 已提交
906 907 908 909 910

extern struct pglist_data contig_page_data;
#define NODE_DATA(nid)		(&contig_page_data)
#define NODE_MEM_MAP(nid)	mem_map

911
#else /* CONFIG_NEED_MULTIPLE_NODES */
L
Linus Torvalds 已提交
912 913 914

#include <asm/mmzone.h>

915
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
916

917 918 919
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
920 921

/**
922
 * for_each_online_pgdat - helper macro to iterate over all online nodes
923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940
 * @pgdat - pointer to a pg_data_t variable
 */
#define for_each_online_pgdat(pgdat)			\
	for (pgdat = first_online_pgdat();		\
	     pgdat;					\
	     pgdat = next_online_pgdat(pgdat))
/**
 * for_each_zone - helper macro to iterate over all memory zones
 * @zone - pointer to struct zone variable
 *
 * The user only needs to declare the zone variable, for_each_zone
 * fills it in.
 */
#define for_each_zone(zone)			        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))

941 942 943 944 945 946 947 948
#define for_each_populated_zone(zone)		        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))			\
		if (!populated_zone(zone))		\
			; /* do nothing */		\
		else

949 950 951 952 953 954 955 956 957 958 959 960
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
	return zoneref->zone;
}

static inline int zonelist_zone_idx(struct zoneref *zoneref)
{
	return zoneref->zone_idx;
}

static inline int zonelist_node_idx(struct zoneref *zoneref)
{
961
	return zone_to_nid(zoneref->zone);
962 963
}

964 965 966 967
struct zoneref *__next_zones_zonelist(struct zoneref *z,
					enum zone_type highest_zoneidx,
					nodemask_t *nodes);

968 969 970 971 972 973 974 975
/**
 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
 * @z - The cursor used as a starting point for the search
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
 *
 * This function returns the next zone at or below a given zone index that is
 * within the allowed nodemask using a cursor as the starting point for the
976 977 978
 * search. The zoneref returned is a cursor that represents the current zone
 * being examined. It should be advanced by one before calling
 * next_zones_zonelist again.
979
 */
980
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
981
					enum zone_type highest_zoneidx,
982 983 984 985 986 987
					nodemask_t *nodes)
{
	if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
		return z;
	return __next_zones_zonelist(z, highest_zoneidx, nodes);
}
988

989 990 991 992 993
/**
 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
 * @zonelist - The zonelist to search for a suitable zone
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
994
 * @return - Zoneref pointer for the first suitable zone found (see below)
995 996 997
 *
 * This function returns the first zone at or below a given zone index that is
 * within the allowed nodemask. The zoneref returned is a cursor that can be
998 999
 * used to iterate the zonelist with next_zones_zonelist by advancing it by
 * one before calling.
1000 1001 1002 1003
 *
 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
 * never NULL). This may happen either genuinely, or due to concurrent nodemask
 * update due to cpuset modification.
1004
 */
1005
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
1006
					enum zone_type highest_zoneidx,
1007
					nodemask_t *nodes)
1008
{
1009
	return next_zones_zonelist(zonelist->_zonerefs,
1010
							highest_zoneidx, nodes);
1011 1012
}

1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024
/**
 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 * @nodemask - Nodemask allowed by the allocator
 *
 * This iterator iterates though all zones at or below a given zone index and
 * within a given nodemask
 */
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1025
	for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z);	\
1026
		zone;							\
1027
		z = next_zones_zonelist(++z, highidx, nodemask),	\
1028 1029 1030 1031 1032 1033 1034 1035
			zone = zonelist_zone(z))

#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
	for (zone = z->zone;	\
		zone;							\
		z = next_zones_zonelist(++z, highidx, nodemask),	\
			zone = zonelist_zone(z))

1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046

/**
 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 *
 * This iterator iterates though all zones at or below a given zone index.
 */
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1047
	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1048

A
Andy Whitcroft 已提交
1049 1050 1051 1052
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif

1053
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
T
Tejun Heo 已提交
1054
	!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1055 1056
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
{
1057
	BUILD_BUG_ON(IS_ENABLED(CONFIG_NUMA));
1058 1059
	return 0;
}
1060 1061
#endif

1062 1063 1064 1065
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn)		(0)
#endif

A
Andy Whitcroft 已提交
1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
#ifdef CONFIG_SPARSEMEM

/*
 * SECTION_SHIFT    		#bits space required to store a section #
 *
 * PA_SECTION_SHIFT		physical address to/from section number
 * PFN_SECTION_SHIFT		pfn to/from section number
 */
#define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
#define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)

#define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)

#define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))

1082
#define SECTION_BLOCKFLAGS_BITS \
1083
	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1084

A
Andy Whitcroft 已提交
1085 1086 1087 1088
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif

1089 1090 1091 1092 1093 1094 1095 1096
static inline unsigned long pfn_to_section_nr(unsigned long pfn)
{
	return pfn >> PFN_SECTION_SHIFT;
}
static inline unsigned long section_nr_to_pfn(unsigned long sec)
{
	return sec << PFN_SECTION_SHIFT;
}
1097

1098 1099 1100
#define SECTION_ALIGN_UP(pfn)	(((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn)	((pfn) & PAGE_SECTION_MASK)

A
Andy Whitcroft 已提交
1101
struct page;
1102
struct page_ext;
A
Andy Whitcroft 已提交
1103
struct mem_section {
A
Andy Whitcroft 已提交
1104 1105 1106 1107 1108
	/*
	 * This is, logically, a pointer to an array of struct
	 * pages.  However, it is stored with some other magic.
	 * (see sparse.c::sparse_init_one_section())
	 *
1109 1110 1111 1112
	 * Additionally during early boot we encode node id of
	 * the location of the section here to guide allocation.
	 * (see sparse.c::memory_present())
	 *
A
Andy Whitcroft 已提交
1113 1114 1115 1116
	 * Making it a UL at least makes someone do a cast
	 * before using it wrong.
	 */
	unsigned long section_mem_map;
1117 1118 1119

	/* See declaration of similar field in struct zone */
	unsigned long *pageblock_flags;
1120 1121
#ifdef CONFIG_PAGE_EXTENSION
	/*
1122
	 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1123 1124 1125 1126 1127
	 * section. (see page_ext.h about this.)
	 */
	struct page_ext *page_ext;
	unsigned long pad;
#endif
1128 1129 1130 1131
	/*
	 * WARNING: mem_section must be a power-of-2 in size for the
	 * calculation and use of SECTION_ROOT_MASK to make sense.
	 */
A
Andy Whitcroft 已提交
1132 1133
};

1134 1135 1136 1137 1138
#ifdef CONFIG_SPARSEMEM_EXTREME
#define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section))
#else
#define SECTIONS_PER_ROOT	1
#endif
B
Bob Picco 已提交
1139

1140
#define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
1141
#define NR_SECTION_ROOTS	DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1142
#define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
B
Bob Picco 已提交
1143

1144
#ifdef CONFIG_SPARSEMEM_EXTREME
1145
extern struct mem_section **mem_section;
B
Bob Picco 已提交
1146
#else
1147 1148
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
A
Andy Whitcroft 已提交
1149

A
Andy Whitcroft 已提交
1150 1151
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
1152 1153 1154 1155
#ifdef CONFIG_SPARSEMEM_EXTREME
	if (!mem_section)
		return NULL;
#endif
1156 1157 1158
	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
		return NULL;
	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
A
Andy Whitcroft 已提交
1159
}
1160
extern int __section_nr(struct mem_section* ms);
1161
extern unsigned long usemap_size(void);
A
Andy Whitcroft 已提交
1162 1163 1164

/*
 * We use the lower bits of the mem_map pointer to store
1165 1166 1167 1168 1169 1170 1171 1172 1173 1174
 * a little bit of information.  The pointer is calculated
 * as mem_map - section_nr_to_pfn(pnum).  The result is
 * aligned to the minimum alignment of the two values:
 *   1. All mem_map arrays are page-aligned.
 *   2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT
 *      lowest bits.  PFN_SECTION_SHIFT is arch-specific
 *      (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the
 *      worst combination is powerpc with 256k pages,
 *      which results in PFN_SECTION_SHIFT equal 6.
 * To sum it up, at least 6 bits are available.
A
Andy Whitcroft 已提交
1175 1176 1177
 */
#define	SECTION_MARKED_PRESENT	(1UL<<0)
#define SECTION_HAS_MEM_MAP	(1UL<<1)
1178 1179
#define SECTION_IS_ONLINE	(1UL<<2)
#define SECTION_MAP_LAST_BIT	(1UL<<3)
A
Andy Whitcroft 已提交
1180
#define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
1181
#define SECTION_NID_SHIFT	3
A
Andy Whitcroft 已提交
1182 1183 1184 1185 1186 1187 1188 1189

static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
	unsigned long map = section->section_mem_map;
	map &= SECTION_MAP_MASK;
	return (struct page *)map;
}

1190
static inline int present_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1191
{
B
Bob Picco 已提交
1192
	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
A
Andy Whitcroft 已提交
1193 1194
}

1195 1196 1197 1198 1199 1200
static inline int present_section_nr(unsigned long nr)
{
	return present_section(__nr_to_section(nr));
}

static inline int valid_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1201
{
B
Bob Picco 已提交
1202
	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
A
Andy Whitcroft 已提交
1203 1204 1205 1206 1207 1208 1209
}

static inline int valid_section_nr(unsigned long nr)
{
	return valid_section(__nr_to_section(nr));
}

1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226
static inline int online_section(struct mem_section *section)
{
	return (section && (section->section_mem_map & SECTION_IS_ONLINE));
}

static inline int online_section_nr(unsigned long nr)
{
	return online_section(__nr_to_section(nr));
}

#ifdef CONFIG_MEMORY_HOTPLUG
void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#ifdef CONFIG_MEMORY_HOTREMOVE
void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn);
#endif
#endif

A
Andy Whitcroft 已提交
1227 1228
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
A
Andy Whitcroft 已提交
1229
	return __nr_to_section(pfn_to_section_nr(pfn));
A
Andy Whitcroft 已提交
1230 1231
}

1232 1233
extern int __highest_present_section_nr;

1234
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
A
Andy Whitcroft 已提交
1235 1236 1237 1238
static inline int pfn_valid(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
A
Andy Whitcroft 已提交
1239
	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
A
Andy Whitcroft 已提交
1240
}
1241
#endif
A
Andy Whitcroft 已提交
1242

1243 1244 1245 1246 1247 1248 1249
static inline int pfn_present(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
}

A
Andy Whitcroft 已提交
1250 1251 1252 1253 1254 1255
/*
 * These are _only_ used during initialisation, therefore they
 * can use __initdata ...  They could have names to indicate
 * this restriction.
 */
#ifdef CONFIG_NUMA
1256 1257 1258 1259 1260
#define pfn_to_nid(pfn)							\
({									\
	unsigned long __pfn_to_nid_pfn = (pfn);				\
	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\
})
1261 1262
#else
#define pfn_to_nid(pfn)		(0)
A
Andy Whitcroft 已提交
1263 1264 1265 1266 1267 1268
#endif

#define early_pfn_valid(pfn)	pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init()	do {} while (0)
1269
#define sparse_index_init(_sec, _nid)  do {} while (0)
A
Andy Whitcroft 已提交
1270 1271
#endif /* CONFIG_SPARSEMEM */

1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282
/*
 * During memory init memblocks map pfns to nids. The search is expensive and
 * this caches recent lookups. The implementation of __early_pfn_to_nid
 * may treat start/end as pfns or sections.
 */
struct mminit_pfnnid_cache {
	unsigned long last_start;
	unsigned long last_end;
	int last_nid;
};

A
Andy Whitcroft 已提交
1283 1284 1285 1286 1287 1288
#ifndef early_pfn_valid
#define early_pfn_valid(pfn)	(1)
#endif

void memory_present(int nid, unsigned long start, unsigned long end);

1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300
/*
 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
 * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
 * pfn_valid_within() should be used in this case; we optimise this away
 * when we have no holes within a MAX_ORDER_NR_PAGES block.
 */
#ifdef CONFIG_HOLES_IN_ZONE
#define pfn_valid_within(pfn) pfn_valid(pfn)
#else
#define pfn_valid_within(pfn) (1)
#endif

1301 1302 1303
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
/*
 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
1304 1305 1306 1307 1308 1309 1310 1311 1312
 * associated with it or not. This means that a struct page exists for this
 * pfn. The caller cannot assume the page is fully initialized in general.
 * Hotplugable pages might not have been onlined yet. pfn_to_online_page()
 * will ensure the struct page is fully online and initialized. Special pages
 * (e.g. ZONE_DEVICE) are never onlined and should be treated accordingly.
 *
 * In FLATMEM, it is expected that holes always have valid memmap as long as
 * there is valid PFNs either side of the hole. In SPARSEMEM, it is assumed
 * that a valid section has a memmap for the entire section.
1313 1314 1315 1316 1317 1318 1319 1320 1321
 *
 * However, an ARM, and maybe other embedded architectures in the future
 * free memmap backing holes to save memory on the assumption the memmap is
 * never used. The page_zone linkages are then broken even though pfn_valid()
 * returns true. A walker of the full memmap must then do this additional
 * check to ensure the memmap they are looking at is sane by making sure
 * the zone and PFN linkages are still valid. This is expensive, but walkers
 * of the full memmap are extremely rare.
 */
1322
bool memmap_valid_within(unsigned long pfn,
1323 1324
					struct page *page, struct zone *zone);
#else
1325
static inline bool memmap_valid_within(unsigned long pfn,
1326 1327
					struct page *page, struct zone *zone)
{
1328
	return true;
1329 1330 1331
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */

C
Christoph Lameter 已提交
1332
#endif /* !__GENERATING_BOUNDS.H */
L
Linus Torvalds 已提交
1333 1334
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */