mmzone.h 38.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H

#ifndef __ASSEMBLY__
C
Christoph Lameter 已提交
5
#ifndef __GENERATING_BOUNDS_H
L
Linus Torvalds 已提交
6 7 8 9

#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
10
#include <linux/bitops.h>
L
Linus Torvalds 已提交
11 12 13 14
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
15
#include <linux/seqlock.h>
16
#include <linux/nodemask.h>
17
#include <linux/pageblock-flags.h>
18
#include <linux/page-flags-layout.h>
A
Arun Sharma 已提交
19
#include <linux/atomic.h>
R
Ralf Baechle 已提交
20
#include <asm/page.h>
L
Linus Torvalds 已提交
21 22 23 24 25 26 27

/* Free memory management - zoned buddy allocator.  */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define MAX_ORDER 11
#else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif
28
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
L
Linus Torvalds 已提交
29

A
Andy Whitcroft 已提交
30 31 32
/*
 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
 * costly to service.  That is between allocation orders which should
33
 * coalesce naturally under reasonable reclaim pressure and those which
A
Andy Whitcroft 已提交
34 35 36 37
 * will not.
 */
#define PAGE_ALLOC_COSTLY_ORDER 3

38 39 40
enum {
	MIGRATE_UNMOVABLE,
	MIGRATE_MOVABLE,
41
	MIGRATE_RECLAIMABLE,
42 43
	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
#ifdef CONFIG_CMA
	/*
	 * MIGRATE_CMA migration type is designed to mimic the way
	 * ZONE_MOVABLE works.  Only movable pages can be allocated
	 * from MIGRATE_CMA pageblocks and page allocator never
	 * implicitly change migration type of MIGRATE_CMA pageblock.
	 *
	 * The way to use it is to change migratetype of a range of
	 * pageblocks to MIGRATE_CMA which can be done by
	 * __free_pageblock_cma() function.  What is important though
	 * is that a range of pageblocks must be aligned to
	 * MAX_ORDER_NR_PAGES should biggest page be bigger then
	 * a single pageblock.
	 */
	MIGRATE_CMA,
#endif
60
#ifdef CONFIG_MEMORY_ISOLATION
61
	MIGRATE_ISOLATE,	/* can't allocate from here */
62
#endif
63 64 65
	MIGRATE_TYPES
};

66 67 68
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
extern char * const migratetype_names[MIGRATE_TYPES];

69 70
#ifdef CONFIG_CMA
#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
L
Laura Abbott 已提交
71
#  define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
72 73
#else
#  define is_migrate_cma(migratetype) false
L
Laura Abbott 已提交
74
#  define is_migrate_cma_page(_page) false
75
#endif
76 77 78 79 80

#define for_each_migratetype_order(order, type) \
	for (order = 0; order < MAX_ORDER; order++) \
		for (type = 0; type < MIGRATE_TYPES; type++)

81 82
extern int page_group_by_mobility_disabled;

83 84 85
#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)

86 87 88 89
#define get_pageblock_migratetype(page)					\
	get_pfnblock_flags_mask(page, page_to_pfn(page),		\
			PB_migrate_end, MIGRATETYPE_MASK)

L
Linus Torvalds 已提交
90
struct free_area {
91
	struct list_head	free_list[MIGRATE_TYPES];
L
Linus Torvalds 已提交
92 93 94 95 96 97
	unsigned long		nr_free;
};

struct pglist_data;

/*
98
 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
L
Linus Torvalds 已提交
99 100 101 102 103 104 105
 * So add a wild amount of padding here to ensure that they fall into separate
 * cachelines.  There are very few zone structures in the machine, so space
 * consumption is not a concern here.
 */
#if defined(CONFIG_SMP)
struct zone_padding {
	char x[0];
106
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
107 108 109 110 111
#define ZONE_PADDING(name)	struct zone_padding name;
#else
#define ZONE_PADDING(name)
#endif

112
enum zone_stat_item {
113
	/* First 128 byte cacheline (assuming 64 bit words) */
114
	NR_FREE_PAGES,
M
Minchan Kim 已提交
115 116 117 118 119 120
	NR_ZONE_LRU_BASE, /* Used only for compaction and reclaim retry */
	NR_ZONE_INACTIVE_ANON = NR_ZONE_LRU_BASE,
	NR_ZONE_ACTIVE_ANON,
	NR_ZONE_INACTIVE_FILE,
	NR_ZONE_ACTIVE_FILE,
	NR_ZONE_UNEVICTABLE,
121
	NR_ZONE_WRITE_PENDING,	/* Count of dirty, writeback and unstable pages */
N
Nick Piggin 已提交
122
	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
123 124 125
	NR_SLAB_RECLAIMABLE,
	NR_SLAB_UNRECLAIMABLE,
	NR_PAGETABLE,		/* used for pagetables */
126
	NR_KERNEL_STACK_KB,	/* measured in KiB */
127
	/* Second 128 byte cacheline */
128
	NR_BOUNCE,
M
Minchan Kim 已提交
129 130 131
#if IS_ENABLED(CONFIG_ZSMALLOC)
	NR_ZSPAGES,		/* allocated in zsmalloc */
#endif
132 133 134 135 136 137 138 139
#ifdef CONFIG_NUMA
	NUMA_HIT,		/* allocated in intended node */
	NUMA_MISS,		/* allocated in non intended node */
	NUMA_FOREIGN,		/* was intended here, hit elsewhere */
	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
	NUMA_LOCAL,		/* allocation from local node */
	NUMA_OTHER,		/* allocation from other node */
#endif
140
	NR_FREE_CMA_PAGES,
141 142
	NR_VM_ZONE_STAT_ITEMS };

143
enum node_stat_item {
M
Mel Gorman 已提交
144 145 146 147 148 149 150 151
	NR_LRU_BASE,
	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */
	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */
	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */
	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
152 153 154
	WORKINGSET_REFAULT,
	WORKINGSET_ACTIVATE,
	WORKINGSET_NODERECLAIM,
155
	NR_ANON_MAPPED,	/* Mapped anonymous pages */
156 157
	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
			   only modified from process context */
158 159 160 161 162 163 164 165 166
	NR_FILE_PAGES,
	NR_FILE_DIRTY,
	NR_WRITEBACK,
	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */
	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */
	NR_SHMEM_THPS,
	NR_SHMEM_PMDMAPPED,
	NR_ANON_THPS,
	NR_UNSTABLE_NFS,	/* NFS unstable pages */
167 168 169 170
	NR_VMSCAN_WRITE,
	NR_VMSCAN_IMMEDIATE,	/* Prioritise for reclaim when writeback ends */
	NR_DIRTIED,		/* page dirtyings since bootup */
	NR_WRITTEN,		/* page writings since bootup */
171 172 173
	NR_VM_NODE_STAT_ITEMS
};

174 175 176 177 178 179 180 181 182 183 184 185 186
/*
 * We do arithmetic on the LRU lists in various places in the code,
 * so it is important to keep the active lists LRU_ACTIVE higher in
 * the array than the corresponding inactive lists, and to keep
 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
 *
 * This has to be kept in sync with the statistics in zone_stat_item
 * above and the descriptions in vmstat_text in mm/vmstat.c
 */
#define LRU_BASE 0
#define LRU_ACTIVE 1
#define LRU_FILE 2

187
enum lru_list {
188 189 190 191
	LRU_INACTIVE_ANON = LRU_BASE,
	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
L
Lee Schermerhorn 已提交
192 193 194
	LRU_UNEVICTABLE,
	NR_LRU_LISTS
};
195

H
Hugh Dickins 已提交
196
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
197

H
Hugh Dickins 已提交
198
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
L
Lee Schermerhorn 已提交
199

H
Hugh Dickins 已提交
200
static inline int is_file_lru(enum lru_list lru)
201
{
H
Hugh Dickins 已提交
202
	return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
203 204
}

H
Hugh Dickins 已提交
205
static inline int is_active_lru(enum lru_list lru)
206
{
H
Hugh Dickins 已提交
207
	return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
208 209
}

210 211 212
struct zone_reclaim_stat {
	/*
	 * The pageout code in vmscan.c keeps track of how many of the
213
	 * mem/swap backed and file backed pages are referenced.
214 215 216 217 218 219 220 221 222
	 * The higher the rotated/scanned ratio, the more valuable
	 * that cache is.
	 *
	 * The anon LRU stats live in [0], file LRU stats in [1]
	 */
	unsigned long		recent_rotated[2];
	unsigned long		recent_scanned[2];
};

223
struct lruvec {
224 225 226 227
	struct list_head		lists[NR_LRU_LISTS];
	struct zone_reclaim_stat	reclaim_stat;
	/* Evictions & activations on the inactive file list */
	atomic_long_t			inactive_age;
A
Andrew Morton 已提交
228
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
229
	struct pglist_data *pgdat;
230
#endif
231 232
};

233 234 235 236 237
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)

238
/* Isolate unmapped file */
239
#define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2)
240
/* Isolate for asynchronous migration */
241
#define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x4)
M
Minchan Kim 已提交
242 243
/* Isolate unevictable pages */
#define ISOLATE_UNEVICTABLE	((__force isolate_mode_t)0x8)
244 245

/* LRU Isolation modes. */
246
typedef unsigned __bitwise isolate_mode_t;
247

248 249 250 251 252 253 254 255 256 257 258
enum zone_watermarks {
	WMARK_MIN,
	WMARK_LOW,
	WMARK_HIGH,
	NR_WMARK
};

#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])

L
Linus Torvalds 已提交
259 260 261 262
struct per_cpu_pages {
	int count;		/* number of pages in the list */
	int high;		/* high watermark, emptying needed */
	int batch;		/* chunk size for buddy add/remove */
263 264 265

	/* Lists of pages, one per migrate type stored on the pcp-lists */
	struct list_head lists[MIGRATE_PCPTYPES];
L
Linus Torvalds 已提交
266 267 268
};

struct per_cpu_pageset {
269
	struct per_cpu_pages pcp;
270 271 272
#ifdef CONFIG_NUMA
	s8 expire;
#endif
273
#ifdef CONFIG_SMP
274
	s8 stat_threshold;
275 276
	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
277
};
278

279 280 281 282 283
struct per_cpu_nodestat {
	s8 stat_threshold;
	s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};

C
Christoph Lameter 已提交
284 285
#endif /* !__GENERATING_BOUNDS.H */

286
enum zone_type {
287
#ifdef CONFIG_ZONE_DMA
288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306
	/*
	 * ZONE_DMA is used when there are devices that are not able
	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
	 * carve out the portion of memory that is needed for these devices.
	 * The range is arch specific.
	 *
	 * Some examples
	 *
	 * Architecture		Limit
	 * ---------------------------
	 * parisc, ia64, sparc	<4G
	 * s390			<2G
	 * arm			Various
	 * alpha		Unlimited or 0-16MB.
	 *
	 * i386, x86_64 and multiple other arches
	 * 			<16M.
	 */
	ZONE_DMA,
307
#endif
308
#ifdef CONFIG_ZONE_DMA32
309 310 311 312 313 314
	/*
	 * x86_64 needs two ZONE_DMAs because it supports devices that are
	 * only able to do DMA to the lower 16M but also 32 bit devices that
	 * can only do DMA areas below 4G.
	 */
	ZONE_DMA32,
315
#endif
316 317 318 319 320 321
	/*
	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
	 * performed on pages in ZONE_NORMAL if the DMA devices support
	 * transfers to all addressable memory.
	 */
	ZONE_NORMAL,
322
#ifdef CONFIG_HIGHMEM
323 324 325 326 327 328 329 330 331
	/*
	 * A memory area that is only addressable by the kernel through
	 * mapping portions into its own address space. This is for example
	 * used by i386 to allow the kernel to address the memory beyond
	 * 900MB. The kernel will set up special mappings (page
	 * table entries on i386) for each page that the kernel needs to
	 * access.
	 */
	ZONE_HIGHMEM,
332
#endif
M
Mel Gorman 已提交
333
	ZONE_MOVABLE,
334 335 336
#ifdef CONFIG_ZONE_DEVICE
	ZONE_DEVICE,
#endif
C
Christoph Lameter 已提交
337
	__MAX_NR_ZONES
338

339
};
L
Linus Torvalds 已提交
340

C
Christoph Lameter 已提交
341 342
#ifndef __GENERATING_BOUNDS_H

L
Linus Torvalds 已提交
343
struct zone {
344
	/* Read-mostly fields */
345 346 347 348

	/* zone watermarks, access with *_wmark_pages(zone) macros */
	unsigned long watermark[NR_WMARK];

349 350
	unsigned long nr_reserved_highatomic;

L
Linus Torvalds 已提交
351
	/*
352 353 354 355 356 357 358
	 * We don't know if the memory that we're going to allocate will be
	 * freeable or/and it will be released eventually, so to avoid totally
	 * wasting several GB of ram we must reserve some of the lower zone
	 * memory (otherwise we risk to run OOM on the lower zones despite
	 * there being tons of freeable ram on the higher zones).  This array is
	 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
	 * changes.
L
Linus Torvalds 已提交
359
	 */
360
	long lowmem_reserve[MAX_NR_ZONES];
361

362
#ifdef CONFIG_NUMA
363
	int node;
364 365
#endif
	struct pglist_data	*zone_pgdat;
366
	struct per_cpu_pageset __percpu *pageset;
367

368 369
#ifndef CONFIG_SPARSEMEM
	/*
370
	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
371 372 373 374 375
	 * In SPARSEMEM, this map is stored in struct mem_section
	 */
	unsigned long		*pageblock_flags;
#endif /* CONFIG_SPARSEMEM */

L
Linus Torvalds 已提交
376 377 378
	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
	unsigned long		zone_start_pfn;

379
	/*
380 381 382
	 * spanned_pages is the total pages spanned by the zone, including
	 * holes, which is calculated as:
	 * 	spanned_pages = zone_end_pfn - zone_start_pfn;
383
	 *
384 385
	 * present_pages is physical pages existing within the zone, which
	 * is calculated as:
386
	 *	present_pages = spanned_pages - absent_pages(pages in holes);
387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406
	 *
	 * managed_pages is present pages managed by the buddy system, which
	 * is calculated as (reserved_pages includes pages allocated by the
	 * bootmem allocator):
	 *	managed_pages = present_pages - reserved_pages;
	 *
	 * So present_pages may be used by memory hotplug or memory power
	 * management logic to figure out unmanaged pages by checking
	 * (present_pages - managed_pages). And managed_pages should be used
	 * by page allocator and vm scanner to calculate all kinds of watermarks
	 * and thresholds.
	 *
	 * Locking rules:
	 *
	 * zone_start_pfn and spanned_pages are protected by span_seqlock.
	 * It is a seqlock because it has to be read outside of zone->lock,
	 * and it is done in the main allocator path.  But, it is written
	 * quite infrequently.
	 *
	 * The span_seq lock is declared along with zone->lock because it is
407 408
	 * frequently read in proximity to zone->lock.  It's good to
	 * give them a chance of being in the same cacheline.
409
	 *
410
	 * Write access to present_pages at runtime should be protected by
411 412
	 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
	 * present_pages should get_online_mems() to get a stable value.
413 414 415 416 417 418
	 *
	 * Read access to managed_pages should be safe because it's unsigned
	 * long. Write access to zone->managed_pages and totalram_pages are
	 * protected by managed_page_count_lock at runtime. Idealy only
	 * adjust_managed_page_count() should be used instead of directly
	 * touching zone->managed_pages and totalram_pages.
419
	 */
420
	unsigned long		managed_pages;
421 422
	unsigned long		spanned_pages;
	unsigned long		present_pages;
423 424

	const char		*name;
L
Linus Torvalds 已提交
425

426 427 428 429 430 431 432 433 434
#ifdef CONFIG_MEMORY_ISOLATION
	/*
	 * Number of isolated pageblock. It is used to solve incorrect
	 * freepage counting problem due to racy retrieving migratetype
	 * of pageblock. Protected by zone->lock.
	 */
	unsigned long		nr_isolate_pageblock;
#endif

435 436 437 438 439
#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;
#endif

440
	int initialized;
441

442
	/* Write-intensive fields used from the page allocator */
443
	ZONE_PADDING(_pad1_)
444

445 446 447 448 449 450
	/* free areas of different sizes */
	struct free_area	free_area[MAX_ORDER];

	/* zone flags, see below */
	unsigned long		flags;

451
	/* Primarily protects free_area */
452 453
	spinlock_t		lock;

454
	/* Write-intensive fields used by compaction and vmstats. */
455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
	ZONE_PADDING(_pad2_)

	/*
	 * When free pages are below this point, additional steps are taken
	 * when reading the number of free pages to avoid per-cpu counter
	 * drift allowing watermarks to be breached
	 */
	unsigned long percpu_drift_mark;

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* pfn where compaction free scanner should start */
	unsigned long		compact_cached_free_pfn;
	/* pfn where async and sync compaction migration scanner should start */
	unsigned long		compact_cached_migrate_pfn[2];
#endif

#ifdef CONFIG_COMPACTION
	/*
	 * On compaction failure, 1<<compact_defer_shift compactions
	 * are skipped before trying again. The number attempted since
	 * last failure is tracked with compact_considered.
	 */
	unsigned int		compact_considered;
	unsigned int		compact_defer_shift;
	int			compact_order_failed;
#endif

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* Set to true when the PG_migrate_skip bits should be cleared */
	bool			compact_blockskip_flush;
#endif

487 488
	bool			contiguous;

489 490 491
	ZONE_PADDING(_pad3_)
	/* Zone statistics */
	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
492
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
493

M
Mel Gorman 已提交
494 495
enum pgdat_flags {
	PGDAT_CONGESTED,		/* pgdat has many dirty pages backed by
496 497
					 * a congested BDI
					 */
M
Mel Gorman 已提交
498
	PGDAT_DIRTY,			/* reclaim scanning has recently found
499 500 501
					 * many dirty file pages at the tail
					 * of the LRU.
					 */
M
Mel Gorman 已提交
502
	PGDAT_WRITEBACK,		/* reclaim scanning has recently found
503 504
					 * many pages under writeback
					 */
505
	PGDAT_RECLAIM_LOCKED,		/* prevents concurrent reclaim */
J
Johannes Weiner 已提交
506
};
507

R
Russ Anderson 已提交
508
static inline unsigned long zone_end_pfn(const struct zone *zone)
509 510 511 512 513 514 515 516 517
{
	return zone->zone_start_pfn + zone->spanned_pages;
}

static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
	return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}

518 519
static inline bool zone_is_initialized(struct zone *zone)
{
520
	return zone->initialized;
521 522 523 524 525 526 527
}

static inline bool zone_is_empty(struct zone *zone)
{
	return zone->spanned_pages == 0;
}

L
Linus Torvalds 已提交
528 529 530 531 532 533 534
/*
 * The "priority" of VM scanning is how much of the queues we will scan in one
 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
 * queues ("queue_length >> 12") during an aging round.
 */
#define DEF_PRIORITY 12

535 536 537
/* Maximum number of zones on a zonelist */
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)

538 539
enum {
	ZONELIST_FALLBACK,	/* zonelist with fallback */
540
#ifdef CONFIG_NUMA
541 542 543 544 545
	/*
	 * The NUMA zonelists are doubled because we need zonelists that
	 * restrict the allocations to a single node for __GFP_THISNODE.
	 */
	ZONELIST_NOFALLBACK,	/* zonelist without fallback (__GFP_THISNODE) */
546
#endif
547 548
	MAX_ZONELISTS
};
549

550 551 552 553 554 555 556 557 558
/*
 * This struct contains information about a zone in a zonelist. It is stored
 * here to avoid dereferences into large structures and lookups of tables
 */
struct zoneref {
	struct zone *zone;	/* Pointer to actual zone */
	int zone_idx;		/* zone_idx(zoneref->zone) */
};

L
Linus Torvalds 已提交
559 560 561 562 563 564
/*
 * One allocation request operates on a zonelist. A zonelist
 * is a list of zones, the first one is the 'goal' of the
 * allocation, the other zones are fallback zones, in decreasing
 * priority.
 *
565 566 567 568 569 570 571
 * To speed the reading of the zonelist, the zonerefs contain the zone index
 * of the entry being read. Helper functions to access information given
 * a struct zoneref are
 *
 * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs
 * zonelist_zone_idx()	- Return the index of the zone for an entry
 * zonelist_node_idx()	- Return the index of the node for an entry
L
Linus Torvalds 已提交
572 573
 */
struct zonelist {
574
	struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
L
Linus Torvalds 已提交
575 576
};

577 578 579 580 581
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
#endif

L
Linus Torvalds 已提交
582 583 584 585 586 587 588 589 590 591 592 593 594 595
/*
 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
 * (mostly NUMA machines?) to denote a higher-level memory zone than the
 * zone denotes.
 *
 * On NUMA machines, each NUMA node would have a pg_data_t to describe
 * it's memory layout.
 *
 * Memory statistics and page replacement data structures are maintained on a
 * per-zone basis.
 */
struct bootmem_data;
typedef struct pglist_data {
	struct zone node_zones[MAX_NR_ZONES];
596
	struct zonelist node_zonelists[MAX_ZONELISTS];
L
Linus Torvalds 已提交
597
	int nr_zones;
598
#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
L
Linus Torvalds 已提交
599
	struct page *node_mem_map;
600 601 602
#ifdef CONFIG_PAGE_EXTENSION
	struct page_ext *node_page_ext;
#endif
A
Andy Whitcroft 已提交
603
#endif
604
#ifndef CONFIG_NO_BOOTMEM
L
Linus Torvalds 已提交
605
	struct bootmem_data *bdata;
606
#endif
607 608 609 610 611 612
#ifdef CONFIG_MEMORY_HOTPLUG
	/*
	 * Must be held any time you expect node_start_pfn, node_present_pages
	 * or node_spanned_pages stay constant.  Holding this will also
	 * guarantee that any pfn_valid() stays that way.
	 *
613 614 615
	 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
	 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
	 *
616
	 * Nests above zone->lock and zone->span_seqlock
617 618 619
	 */
	spinlock_t node_size_lock;
#endif
L
Linus Torvalds 已提交
620 621 622 623 624 625
	unsigned long node_start_pfn;
	unsigned long node_present_pages; /* total number of physical pages */
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
	int node_id;
	wait_queue_head_t kswapd_wait;
626
	wait_queue_head_t pfmemalloc_wait;
627 628
	struct task_struct *kswapd;	/* Protected by
					   mem_hotplug_begin/end() */
629 630 631
	int kswapd_order;
	enum zone_type kswapd_classzone_idx;

632 633
	int kswapd_failures;		/* Number of 'reclaimed == 0' runs */

634 635 636 637 638 639
#ifdef CONFIG_COMPACTION
	int kcompactd_max_order;
	enum zone_type kcompactd_classzone_idx;
	wait_queue_head_t kcompactd_wait;
	struct task_struct *kcompactd;
#endif
640
#ifdef CONFIG_NUMA_BALANCING
641
	/* Lock serializing the migrate rate limiting window */
642 643 644 645 646 647 648 649
	spinlock_t numabalancing_migrate_lock;

	/* Rate limiting time interval */
	unsigned long numabalancing_migrate_next_window;

	/* Number of pages migrated during the rate limiting time interval */
	unsigned long numabalancing_migrate_nr_pages;
#endif
650 651 652 653 654 655
	/*
	 * This is a per-node reserve of pages that are not available
	 * to userspace allocations.
	 */
	unsigned long		totalreserve_pages;

656 657 658 659 660 661 662 663
#ifdef CONFIG_NUMA
	/*
	 * zone reclaim becomes active if more unmapped pages exist.
	 */
	unsigned long		min_unmapped_pages;
	unsigned long		min_slab_pages;
#endif /* CONFIG_NUMA */

664 665 666
	/* Write-intensive fields used by page reclaim */
	ZONE_PADDING(_pad1_)
	spinlock_t		lru_lock;
667 668 669 670 671 672 673 674

#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
	/*
	 * If memory initialisation on large machines is deferred then this
	 * is the first PFN that needs to be initialised.
	 */
	unsigned long first_deferred_pfn;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
675 676 677 678 679 680

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	spinlock_t split_queue_lock;
	struct list_head split_queue;
	unsigned long split_queue_len;
#endif
681

M
Mel Gorman 已提交
682 683 684 685 686 687 688 689 690 691 692 693 694
	/* Fields commonly accessed by the page reclaim scanner */
	struct lruvec		lruvec;

	/*
	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
	 * this node's LRU.  Maintained by the pageout code.
	 */
	unsigned int inactive_ratio;

	unsigned long		flags;

	ZONE_PADDING(_pad2_)

695 696 697
	/* Per-node vmstats */
	struct per_cpu_nodestat __percpu *per_cpu_nodestats;
	atomic_long_t		vm_stat[NR_VM_NODE_STAT_ITEMS];
L
Linus Torvalds 已提交
698 699 700 701
} pg_data_t;

#define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
A
Andy Whitcroft 已提交
702
#ifdef CONFIG_FLAT_NODE_MEM_MAP
703
#define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
A
Andy Whitcroft 已提交
704 705 706
#else
#define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
#endif
707
#define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
L
Linus Torvalds 已提交
708

709
#define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
710
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
711 712 713 714
static inline spinlock_t *zone_lru_lock(struct zone *zone)
{
	return &zone->zone_pgdat->lru_lock;
}
715

716
static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
M
Mel Gorman 已提交
717
{
718
	return &pgdat->lruvec;
M
Mel Gorman 已提交
719 720
}

721 722 723 724 725 726 727 728 729
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
	return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}

static inline bool pgdat_is_empty(pg_data_t *pgdat)
{
	return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
730

731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749
static inline int zone_id(const struct zone *zone)
{
	struct pglist_data *pgdat = zone->zone_pgdat;

	return zone - pgdat->node_zones;
}

#ifdef CONFIG_ZONE_DEVICE
static inline bool is_dev_zone(const struct zone *zone)
{
	return zone_id(zone) == ZONE_DEVICE;
}
#else
static inline bool is_dev_zone(const struct zone *zone)
{
	return false;
}
#endif

750 751
#include <linux/memory_hotplug.h>

752
extern struct mutex zonelists_mutex;
753
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
754
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
755 756 757
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
			 int classzone_idx, unsigned int alloc_flags,
			 long free_pages);
758
bool zone_watermark_ok(struct zone *z, unsigned int order,
759 760
		unsigned long mark, int classzone_idx,
		unsigned int alloc_flags);
761
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
762
		unsigned long mark, int classzone_idx);
D
Dave Hansen 已提交
763 764 765 766
enum memmap_context {
	MEMMAP_EARLY,
	MEMMAP_HOTPLUG,
};
767
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
768
				     unsigned long size);
769

770
extern void lruvec_init(struct lruvec *lruvec);
771

M
Mel Gorman 已提交
772
static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
773
{
A
Andrew Morton 已提交
774
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
775
	return lruvec->pgdat;
776
#else
M
Mel Gorman 已提交
777
	return container_of(lruvec, struct pglist_data, lruvec);
778 779 780
#endif
}

781
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx);
782

L
Linus Torvalds 已提交
783 784 785 786 787 788
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif

789 790 791 792 793 794
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
static inline int local_memory_node(int node_id) { return node_id; };
#endif

L
Linus Torvalds 已提交
795 796 797 798 799 800 801 802 803
#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
#endif

/*
 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
 */
#define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)

804 805 806 807 808 809 810 811 812 813 814 815 816
/*
 * Returns true if a zone has pages managed by the buddy allocator.
 * All the reclaim decisions have to use this function rather than
 * populated_zone(). If the whole zone is reserved then we can easily
 * end up with populated_zone() && !managed_zone().
 */
static inline bool managed_zone(struct zone *zone)
{
	return zone->managed_pages;
}

/* Returns true if a zone has memory */
static inline bool populated_zone(struct zone *zone)
817
{
818
	return zone->present_pages;
819 820
}

M
Mel Gorman 已提交
821 822
extern int movable_zone;

823
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
824 825
static inline int zone_movable_is_highmem(void)
{
826
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
M
Mel Gorman 已提交
827 828
	return movable_zone == ZONE_HIGHMEM;
#else
829
	return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
M
Mel Gorman 已提交
830 831
#endif
}
832
#endif
M
Mel Gorman 已提交
833

834
static inline int is_highmem_idx(enum zone_type idx)
L
Linus Torvalds 已提交
835
{
836
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
837 838
	return (idx == ZONE_HIGHMEM ||
		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
839 840 841
#else
	return 0;
#endif
L
Linus Torvalds 已提交
842 843 844 845 846 847 848 849 850 851
}

/**
 * is_highmem - helper function to quickly check if a struct zone is a 
 *              highmem zone or not.  This is an attempt to keep references
 *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
 * @zone - pointer to struct zone variable
 */
static inline int is_highmem(struct zone *zone)
{
852
#ifdef CONFIG_HIGHMEM
C
Chanho Min 已提交
853
	return is_highmem_idx(zone_idx(zone));
854 855 856
#else
	return 0;
#endif
L
Linus Torvalds 已提交
857 858 859 860
}

/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
861
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
862
					void __user *, size_t *, loff_t *);
863 864
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
L
Linus Torvalds 已提交
865
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
866
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
867
					void __user *, size_t *, loff_t *);
868
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
869
					void __user *, size_t *, loff_t *);
870
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
871
			void __user *, size_t *, loff_t *);
872
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
873
			void __user *, size_t *, loff_t *);
L
Linus Torvalds 已提交
874

875
extern int numa_zonelist_order_handler(struct ctl_table *, int,
876
			void __user *, size_t *, loff_t *);
877 878 879
extern char numa_zonelist_order[];
#define NUMA_ZONELIST_ORDER_LEN 16	/* string buffer size */

880
#ifndef CONFIG_NEED_MULTIPLE_NODES
L
Linus Torvalds 已提交
881 882 883 884 885

extern struct pglist_data contig_page_data;
#define NODE_DATA(nid)		(&contig_page_data)
#define NODE_MEM_MAP(nid)	mem_map

886
#else /* CONFIG_NEED_MULTIPLE_NODES */
L
Linus Torvalds 已提交
887 888 889

#include <asm/mmzone.h>

890
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
891

892 893 894
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
895 896

/**
897
 * for_each_online_pgdat - helper macro to iterate over all online nodes
898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
 * @pgdat - pointer to a pg_data_t variable
 */
#define for_each_online_pgdat(pgdat)			\
	for (pgdat = first_online_pgdat();		\
	     pgdat;					\
	     pgdat = next_online_pgdat(pgdat))
/**
 * for_each_zone - helper macro to iterate over all memory zones
 * @zone - pointer to struct zone variable
 *
 * The user only needs to declare the zone variable, for_each_zone
 * fills it in.
 */
#define for_each_zone(zone)			        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))

916 917 918 919 920 921 922 923
#define for_each_populated_zone(zone)		        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))			\
		if (!populated_zone(zone))		\
			; /* do nothing */		\
		else

924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
	return zoneref->zone;
}

static inline int zonelist_zone_idx(struct zoneref *zoneref)
{
	return zoneref->zone_idx;
}

static inline int zonelist_node_idx(struct zoneref *zoneref)
{
#ifdef CONFIG_NUMA
	/* zone_to_nid not available in this context */
	return zoneref->zone->node;
#else
	return 0;
#endif /* CONFIG_NUMA */
}

944 945 946 947
struct zoneref *__next_zones_zonelist(struct zoneref *z,
					enum zone_type highest_zoneidx,
					nodemask_t *nodes);

948 949 950 951 952 953 954 955
/**
 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
 * @z - The cursor used as a starting point for the search
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
 *
 * This function returns the next zone at or below a given zone index that is
 * within the allowed nodemask using a cursor as the starting point for the
956 957 958
 * search. The zoneref returned is a cursor that represents the current zone
 * being examined. It should be advanced by one before calling
 * next_zones_zonelist again.
959
 */
960
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
961
					enum zone_type highest_zoneidx,
962 963 964 965 966 967
					nodemask_t *nodes)
{
	if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
		return z;
	return __next_zones_zonelist(z, highest_zoneidx, nodes);
}
968

969 970 971 972 973
/**
 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
 * @zonelist - The zonelist to search for a suitable zone
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
974
 * @return - Zoneref pointer for the first suitable zone found (see below)
975 976 977
 *
 * This function returns the first zone at or below a given zone index that is
 * within the allowed nodemask. The zoneref returned is a cursor that can be
978 979
 * used to iterate the zonelist with next_zones_zonelist by advancing it by
 * one before calling.
980 981 982 983
 *
 * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
 * never NULL). This may happen either genuinely, or due to concurrent nodemask
 * update due to cpuset modification.
984
 */
985
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
986
					enum zone_type highest_zoneidx,
987
					nodemask_t *nodes)
988
{
989
	return next_zones_zonelist(zonelist->_zonerefs,
990
							highest_zoneidx, nodes);
991 992
}

993 994 995 996 997 998 999 1000 1001 1002 1003 1004
/**
 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 * @nodemask - Nodemask allowed by the allocator
 *
 * This iterator iterates though all zones at or below a given zone index and
 * within a given nodemask
 */
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1005
	for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z);	\
1006
		zone;							\
1007
		z = next_zones_zonelist(++z, highidx, nodemask),	\
1008 1009 1010 1011 1012 1013 1014 1015
			zone = zonelist_zone(z))

#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
	for (zone = z->zone;	\
		zone;							\
		z = next_zones_zonelist(++z, highidx, nodemask),	\
			zone = zonelist_zone(z))

1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026

/**
 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 *
 * This iterator iterates though all zones at or below a given zone index.
 */
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1027
	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1028

A
Andy Whitcroft 已提交
1029 1030 1031 1032
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif

1033
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
T
Tejun Heo 已提交
1034
	!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1035 1036 1037 1038
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
{
	return 0;
}
1039 1040
#endif

1041 1042 1043 1044
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn)		(0)
#endif

A
Andy Whitcroft 已提交
1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060
#ifdef CONFIG_SPARSEMEM

/*
 * SECTION_SHIFT    		#bits space required to store a section #
 *
 * PA_SECTION_SHIFT		physical address to/from section number
 * PFN_SECTION_SHIFT		pfn to/from section number
 */
#define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
#define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)

#define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)

#define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))

1061
#define SECTION_BLOCKFLAGS_BITS \
1062
	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1063

A
Andy Whitcroft 已提交
1064 1065 1066 1067
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif

1068 1069 1070
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)

1071 1072 1073
#define SECTION_ALIGN_UP(pfn)	(((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn)	((pfn) & PAGE_SECTION_MASK)

A
Andy Whitcroft 已提交
1074
struct page;
1075
struct page_ext;
A
Andy Whitcroft 已提交
1076
struct mem_section {
A
Andy Whitcroft 已提交
1077 1078 1079 1080 1081
	/*
	 * This is, logically, a pointer to an array of struct
	 * pages.  However, it is stored with some other magic.
	 * (see sparse.c::sparse_init_one_section())
	 *
1082 1083 1084 1085
	 * Additionally during early boot we encode node id of
	 * the location of the section here to guide allocation.
	 * (see sparse.c::memory_present())
	 *
A
Andy Whitcroft 已提交
1086 1087 1088 1089
	 * Making it a UL at least makes someone do a cast
	 * before using it wrong.
	 */
	unsigned long section_mem_map;
1090 1091 1092

	/* See declaration of similar field in struct zone */
	unsigned long *pageblock_flags;
1093 1094
#ifdef CONFIG_PAGE_EXTENSION
	/*
1095
	 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1096 1097 1098 1099 1100
	 * section. (see page_ext.h about this.)
	 */
	struct page_ext *page_ext;
	unsigned long pad;
#endif
1101 1102 1103 1104
	/*
	 * WARNING: mem_section must be a power-of-2 in size for the
	 * calculation and use of SECTION_ROOT_MASK to make sense.
	 */
A
Andy Whitcroft 已提交
1105 1106
};

1107 1108 1109 1110 1111
#ifdef CONFIG_SPARSEMEM_EXTREME
#define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section))
#else
#define SECTIONS_PER_ROOT	1
#endif
B
Bob Picco 已提交
1112

1113
#define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
1114
#define NR_SECTION_ROOTS	DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1115
#define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
B
Bob Picco 已提交
1116

1117 1118
#ifdef CONFIG_SPARSEMEM_EXTREME
extern struct mem_section *mem_section[NR_SECTION_ROOTS];
B
Bob Picco 已提交
1119
#else
1120 1121
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
A
Andy Whitcroft 已提交
1122

A
Andy Whitcroft 已提交
1123 1124
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
1125 1126 1127
	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
		return NULL;
	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
A
Andy Whitcroft 已提交
1128
}
1129
extern int __section_nr(struct mem_section* ms);
1130
extern unsigned long usemap_size(void);
A
Andy Whitcroft 已提交
1131 1132 1133 1134 1135 1136 1137 1138 1139 1140

/*
 * We use the lower bits of the mem_map pointer to store
 * a little bit of information.  There should be at least
 * 3 bits here due to 32-bit alignment.
 */
#define	SECTION_MARKED_PRESENT	(1UL<<0)
#define SECTION_HAS_MEM_MAP	(1UL<<1)
#define SECTION_MAP_LAST_BIT	(1UL<<2)
#define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
1141
#define SECTION_NID_SHIFT	2
A
Andy Whitcroft 已提交
1142 1143 1144 1145 1146 1147 1148 1149

static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
	unsigned long map = section->section_mem_map;
	map &= SECTION_MAP_MASK;
	return (struct page *)map;
}

1150
static inline int present_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1151
{
B
Bob Picco 已提交
1152
	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
A
Andy Whitcroft 已提交
1153 1154
}

1155 1156 1157 1158 1159 1160
static inline int present_section_nr(unsigned long nr)
{
	return present_section(__nr_to_section(nr));
}

static inline int valid_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1161
{
B
Bob Picco 已提交
1162
	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
A
Andy Whitcroft 已提交
1163 1164 1165 1166 1167 1168 1169
}

static inline int valid_section_nr(unsigned long nr)
{
	return valid_section(__nr_to_section(nr));
}

A
Andy Whitcroft 已提交
1170 1171
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
A
Andy Whitcroft 已提交
1172
	return __nr_to_section(pfn_to_section_nr(pfn));
A
Andy Whitcroft 已提交
1173 1174
}

1175
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
A
Andy Whitcroft 已提交
1176 1177 1178 1179
static inline int pfn_valid(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
A
Andy Whitcroft 已提交
1180
	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
A
Andy Whitcroft 已提交
1181
}
1182
#endif
A
Andy Whitcroft 已提交
1183

1184 1185 1186 1187 1188 1189 1190
static inline int pfn_present(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
}

A
Andy Whitcroft 已提交
1191 1192 1193 1194 1195 1196
/*
 * These are _only_ used during initialisation, therefore they
 * can use __initdata ...  They could have names to indicate
 * this restriction.
 */
#ifdef CONFIG_NUMA
1197 1198 1199 1200 1201
#define pfn_to_nid(pfn)							\
({									\
	unsigned long __pfn_to_nid_pfn = (pfn);				\
	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\
})
1202 1203
#else
#define pfn_to_nid(pfn)		(0)
A
Andy Whitcroft 已提交
1204 1205 1206 1207 1208 1209
#endif

#define early_pfn_valid(pfn)	pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init()	do {} while (0)
1210
#define sparse_index_init(_sec, _nid)  do {} while (0)
A
Andy Whitcroft 已提交
1211 1212
#endif /* CONFIG_SPARSEMEM */

1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223
/*
 * During memory init memblocks map pfns to nids. The search is expensive and
 * this caches recent lookups. The implementation of __early_pfn_to_nid
 * may treat start/end as pfns or sections.
 */
struct mminit_pfnnid_cache {
	unsigned long last_start;
	unsigned long last_end;
	int last_nid;
};

A
Andy Whitcroft 已提交
1224 1225 1226 1227 1228 1229 1230
#ifndef early_pfn_valid
#define early_pfn_valid(pfn)	(1)
#endif

void memory_present(int nid, unsigned long start, unsigned long end);
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);

1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242
/*
 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
 * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
 * pfn_valid_within() should be used in this case; we optimise this away
 * when we have no holes within a MAX_ORDER_NR_PAGES block.
 */
#ifdef CONFIG_HOLES_IN_ZONE
#define pfn_valid_within(pfn) pfn_valid(pfn)
#else
#define pfn_valid_within(pfn) (1)
#endif

1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
/*
 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
 * associated with it or not. In FLATMEM, it is expected that holes always
 * have valid memmap as long as there is valid PFNs either side of the hole.
 * In SPARSEMEM, it is assumed that a valid section has a memmap for the
 * entire section.
 *
 * However, an ARM, and maybe other embedded architectures in the future
 * free memmap backing holes to save memory on the assumption the memmap is
 * never used. The page_zone linkages are then broken even though pfn_valid()
 * returns true. A walker of the full memmap must then do this additional
 * check to ensure the memmap they are looking at is sane by making sure
 * the zone and PFN linkages are still valid. This is expensive, but walkers
 * of the full memmap are extremely rare.
 */
1259
bool memmap_valid_within(unsigned long pfn,
1260 1261
					struct page *page, struct zone *zone);
#else
1262
static inline bool memmap_valid_within(unsigned long pfn,
1263 1264
					struct page *page, struct zone *zone)
{
1265
	return true;
1266 1267 1268
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */

C
Christoph Lameter 已提交
1269
#endif /* !__GENERATING_BOUNDS.H */
L
Linus Torvalds 已提交
1270 1271
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */