mmzone.h 38.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H

#ifndef __ASSEMBLY__
C
Christoph Lameter 已提交
5
#ifndef __GENERATING_BOUNDS_H
L
Linus Torvalds 已提交
6 7 8 9

#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
10
#include <linux/bitops.h>
L
Linus Torvalds 已提交
11 12 13 14
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
15
#include <linux/seqlock.h>
16
#include <linux/nodemask.h>
17
#include <linux/pageblock-flags.h>
18
#include <linux/page-flags-layout.h>
A
Arun Sharma 已提交
19
#include <linux/atomic.h>
R
Ralf Baechle 已提交
20
#include <asm/page.h>
L
Linus Torvalds 已提交
21 22 23 24 25 26 27

/* Free memory management - zoned buddy allocator.  */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define MAX_ORDER 11
#else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif
28
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
L
Linus Torvalds 已提交
29

A
Andy Whitcroft 已提交
30 31 32
/*
 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
 * costly to service.  That is between allocation orders which should
33
 * coalesce naturally under reasonable reclaim pressure and those which
A
Andy Whitcroft 已提交
34 35 36 37
 * will not.
 */
#define PAGE_ALLOC_COSTLY_ORDER 3

38 39 40
enum {
	MIGRATE_UNMOVABLE,
	MIGRATE_MOVABLE,
41
	MIGRATE_RECLAIMABLE,
42 43
	MIGRATE_PCPTYPES,	/* the number of types on the pcp lists */
	MIGRATE_HIGHATOMIC = MIGRATE_PCPTYPES,
44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59
#ifdef CONFIG_CMA
	/*
	 * MIGRATE_CMA migration type is designed to mimic the way
	 * ZONE_MOVABLE works.  Only movable pages can be allocated
	 * from MIGRATE_CMA pageblocks and page allocator never
	 * implicitly change migration type of MIGRATE_CMA pageblock.
	 *
	 * The way to use it is to change migratetype of a range of
	 * pageblocks to MIGRATE_CMA which can be done by
	 * __free_pageblock_cma() function.  What is important though
	 * is that a range of pageblocks must be aligned to
	 * MAX_ORDER_NR_PAGES should biggest page be bigger then
	 * a single pageblock.
	 */
	MIGRATE_CMA,
#endif
60
#ifdef CONFIG_MEMORY_ISOLATION
61
	MIGRATE_ISOLATE,	/* can't allocate from here */
62
#endif
63 64 65
	MIGRATE_TYPES
};

66 67 68
/* In mm/page_alloc.c; keep in sync also with show_migration_types() there */
extern char * const migratetype_names[MIGRATE_TYPES];

69 70 71 72 73
#ifdef CONFIG_CMA
#  define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
#else
#  define is_migrate_cma(migratetype) false
#endif
74 75 76 77 78

#define for_each_migratetype_order(order, type) \
	for (order = 0; order < MAX_ORDER; order++) \
		for (type = 0; type < MIGRATE_TYPES; type++)

79 80
extern int page_group_by_mobility_disabled;

81 82 83
#define NR_MIGRATETYPE_BITS (PB_migrate_end - PB_migrate + 1)
#define MIGRATETYPE_MASK ((1UL << NR_MIGRATETYPE_BITS) - 1)

84 85 86 87
#define get_pageblock_migratetype(page)					\
	get_pfnblock_flags_mask(page, page_to_pfn(page),		\
			PB_migrate_end, MIGRATETYPE_MASK)

L
Linus Torvalds 已提交
88
struct free_area {
89
	struct list_head	free_list[MIGRATE_TYPES];
L
Linus Torvalds 已提交
90 91 92 93 94 95
	unsigned long		nr_free;
};

struct pglist_data;

/*
96
 * zone->lock and the zone lru_lock are two of the hottest locks in the kernel.
L
Linus Torvalds 已提交
97 98 99 100 101 102 103
 * So add a wild amount of padding here to ensure that they fall into separate
 * cachelines.  There are very few zone structures in the machine, so space
 * consumption is not a concern here.
 */
#if defined(CONFIG_SMP)
struct zone_padding {
	char x[0];
104
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
105 106 107 108 109
#define ZONE_PADDING(name)	struct zone_padding name;
#else
#define ZONE_PADDING(name)
#endif

110
enum zone_stat_item {
111
	/* First 128 byte cacheline (assuming 64 bit words) */
112
	NR_FREE_PAGES,
N
Nick Piggin 已提交
113
	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
114 115 116
	NR_SLAB_RECLAIMABLE,
	NR_SLAB_UNRECLAIMABLE,
	NR_PAGETABLE,		/* used for pagetables */
117 118
	NR_KERNEL_STACK,
	/* Second 128 byte cacheline */
119
	NR_BOUNCE,
M
Minchan Kim 已提交
120 121 122
#if IS_ENABLED(CONFIG_ZSMALLOC)
	NR_ZSPAGES,		/* allocated in zsmalloc */
#endif
123 124 125 126 127 128 129 130
#ifdef CONFIG_NUMA
	NUMA_HIT,		/* allocated in intended node */
	NUMA_MISS,		/* allocated in non intended node */
	NUMA_FOREIGN,		/* was intended here, hit elsewhere */
	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
	NUMA_LOCAL,		/* allocation from local node */
	NUMA_OTHER,		/* allocation from other node */
#endif
131
	NR_FREE_CMA_PAGES,
132 133
	NR_VM_ZONE_STAT_ITEMS };

134
enum node_stat_item {
M
Mel Gorman 已提交
135 136 137 138 139 140 141 142 143
	NR_LRU_BASE,
	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */
	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */
	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */
	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
	NR_PAGES_SCANNED,	/* pages scanned since last reclaim */
144 145 146
	WORKINGSET_REFAULT,
	WORKINGSET_ACTIVATE,
	WORKINGSET_NODERECLAIM,
147
	NR_ANON_MAPPED,	/* Mapped anonymous pages */
148 149
	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
			   only modified from process context */
150 151 152 153 154 155 156 157 158
	NR_FILE_PAGES,
	NR_FILE_DIRTY,
	NR_WRITEBACK,
	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */
	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */
	NR_SHMEM_THPS,
	NR_SHMEM_PMDMAPPED,
	NR_ANON_THPS,
	NR_UNSTABLE_NFS,	/* NFS unstable pages */
159 160 161 162
	NR_VMSCAN_WRITE,
	NR_VMSCAN_IMMEDIATE,	/* Prioritise for reclaim when writeback ends */
	NR_DIRTIED,		/* page dirtyings since bootup */
	NR_WRITTEN,		/* page writings since bootup */
163 164 165
	NR_VM_NODE_STAT_ITEMS
};

166 167 168 169 170 171 172 173 174 175 176 177 178
/*
 * We do arithmetic on the LRU lists in various places in the code,
 * so it is important to keep the active lists LRU_ACTIVE higher in
 * the array than the corresponding inactive lists, and to keep
 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
 *
 * This has to be kept in sync with the statistics in zone_stat_item
 * above and the descriptions in vmstat_text in mm/vmstat.c
 */
#define LRU_BASE 0
#define LRU_ACTIVE 1
#define LRU_FILE 2

179
enum lru_list {
180 181 182 183
	LRU_INACTIVE_ANON = LRU_BASE,
	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
L
Lee Schermerhorn 已提交
184 185 186
	LRU_UNEVICTABLE,
	NR_LRU_LISTS
};
187

H
Hugh Dickins 已提交
188
#define for_each_lru(lru) for (lru = 0; lru < NR_LRU_LISTS; lru++)
189

H
Hugh Dickins 已提交
190
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
L
Lee Schermerhorn 已提交
191

H
Hugh Dickins 已提交
192
static inline int is_file_lru(enum lru_list lru)
193
{
H
Hugh Dickins 已提交
194
	return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
195 196
}

H
Hugh Dickins 已提交
197
static inline int is_active_lru(enum lru_list lru)
198
{
H
Hugh Dickins 已提交
199
	return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
200 201
}

202 203 204
struct zone_reclaim_stat {
	/*
	 * The pageout code in vmscan.c keeps track of how many of the
205
	 * mem/swap backed and file backed pages are referenced.
206 207 208 209 210 211 212 213 214
	 * The higher the rotated/scanned ratio, the more valuable
	 * that cache is.
	 *
	 * The anon LRU stats live in [0], file LRU stats in [1]
	 */
	unsigned long		recent_rotated[2];
	unsigned long		recent_scanned[2];
};

215
struct lruvec {
216 217 218 219
	struct list_head		lists[NR_LRU_LISTS];
	struct zone_reclaim_stat	reclaim_stat;
	/* Evictions & activations on the inactive file list */
	atomic_long_t			inactive_age;
A
Andrew Morton 已提交
220
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
221
	struct pglist_data *pgdat;
222
#endif
223 224
};

225 226 227 228 229
/* Mask used at gathering information at once (see memcontrol.c) */
#define LRU_ALL_FILE (BIT(LRU_INACTIVE_FILE) | BIT(LRU_ACTIVE_FILE))
#define LRU_ALL_ANON (BIT(LRU_INACTIVE_ANON) | BIT(LRU_ACTIVE_ANON))
#define LRU_ALL	     ((1 << NR_LRU_LISTS) - 1)

230
/* Isolate clean file */
231
#define ISOLATE_CLEAN		((__force isolate_mode_t)0x1)
232
/* Isolate unmapped file */
233
#define ISOLATE_UNMAPPED	((__force isolate_mode_t)0x2)
234
/* Isolate for asynchronous migration */
235
#define ISOLATE_ASYNC_MIGRATE	((__force isolate_mode_t)0x4)
M
Minchan Kim 已提交
236 237
/* Isolate unevictable pages */
#define ISOLATE_UNEVICTABLE	((__force isolate_mode_t)0x8)
238 239 240 241

/* LRU Isolation modes. */
typedef unsigned __bitwise__ isolate_mode_t;

242 243 244 245 246 247 248 249 250 251 252
enum zone_watermarks {
	WMARK_MIN,
	WMARK_LOW,
	WMARK_HIGH,
	NR_WMARK
};

#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])

L
Linus Torvalds 已提交
253 254 255 256
struct per_cpu_pages {
	int count;		/* number of pages in the list */
	int high;		/* high watermark, emptying needed */
	int batch;		/* chunk size for buddy add/remove */
257 258 259

	/* Lists of pages, one per migrate type stored on the pcp-lists */
	struct list_head lists[MIGRATE_PCPTYPES];
L
Linus Torvalds 已提交
260 261 262
};

struct per_cpu_pageset {
263
	struct per_cpu_pages pcp;
264 265 266
#ifdef CONFIG_NUMA
	s8 expire;
#endif
267
#ifdef CONFIG_SMP
268
	s8 stat_threshold;
269 270
	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
271
};
272

273 274 275 276 277
struct per_cpu_nodestat {
	s8 stat_threshold;
	s8 vm_node_stat_diff[NR_VM_NODE_STAT_ITEMS];
};

C
Christoph Lameter 已提交
278 279
#endif /* !__GENERATING_BOUNDS.H */

280
enum zone_type {
281
#ifdef CONFIG_ZONE_DMA
282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
	/*
	 * ZONE_DMA is used when there are devices that are not able
	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
	 * carve out the portion of memory that is needed for these devices.
	 * The range is arch specific.
	 *
	 * Some examples
	 *
	 * Architecture		Limit
	 * ---------------------------
	 * parisc, ia64, sparc	<4G
	 * s390			<2G
	 * arm			Various
	 * alpha		Unlimited or 0-16MB.
	 *
	 * i386, x86_64 and multiple other arches
	 * 			<16M.
	 */
	ZONE_DMA,
301
#endif
302
#ifdef CONFIG_ZONE_DMA32
303 304 305 306 307 308
	/*
	 * x86_64 needs two ZONE_DMAs because it supports devices that are
	 * only able to do DMA to the lower 16M but also 32 bit devices that
	 * can only do DMA areas below 4G.
	 */
	ZONE_DMA32,
309
#endif
310 311 312 313 314 315
	/*
	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
	 * performed on pages in ZONE_NORMAL if the DMA devices support
	 * transfers to all addressable memory.
	 */
	ZONE_NORMAL,
316
#ifdef CONFIG_HIGHMEM
317 318 319 320 321 322 323 324 325
	/*
	 * A memory area that is only addressable by the kernel through
	 * mapping portions into its own address space. This is for example
	 * used by i386 to allow the kernel to address the memory beyond
	 * 900MB. The kernel will set up special mappings (page
	 * table entries on i386) for each page that the kernel needs to
	 * access.
	 */
	ZONE_HIGHMEM,
326
#endif
M
Mel Gorman 已提交
327
	ZONE_MOVABLE,
328 329 330
#ifdef CONFIG_ZONE_DEVICE
	ZONE_DEVICE,
#endif
C
Christoph Lameter 已提交
331
	__MAX_NR_ZONES
332

333
};
L
Linus Torvalds 已提交
334

C
Christoph Lameter 已提交
335 336
#ifndef __GENERATING_BOUNDS_H

L
Linus Torvalds 已提交
337
struct zone {
338
	/* Read-mostly fields */
339 340 341 342

	/* zone watermarks, access with *_wmark_pages(zone) macros */
	unsigned long watermark[NR_WMARK];

343 344
	unsigned long nr_reserved_highatomic;

L
Linus Torvalds 已提交
345
	/*
346 347 348 349 350 351 352
	 * We don't know if the memory that we're going to allocate will be
	 * freeable or/and it will be released eventually, so to avoid totally
	 * wasting several GB of ram we must reserve some of the lower zone
	 * memory (otherwise we risk to run OOM on the lower zones despite
	 * there being tons of freeable ram on the higher zones).  This array is
	 * recalculated at runtime if the sysctl_lowmem_reserve_ratio sysctl
	 * changes.
L
Linus Torvalds 已提交
353
	 */
354
	long lowmem_reserve[MAX_NR_ZONES];
355

356
#ifdef CONFIG_NUMA
357
	int node;
358 359
#endif
	struct pglist_data	*zone_pgdat;
360
	struct per_cpu_pageset __percpu *pageset;
361

362 363
#ifndef CONFIG_SPARSEMEM
	/*
364
	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
365 366 367 368 369
	 * In SPARSEMEM, this map is stored in struct mem_section
	 */
	unsigned long		*pageblock_flags;
#endif /* CONFIG_SPARSEMEM */

L
Linus Torvalds 已提交
370 371 372
	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
	unsigned long		zone_start_pfn;

373
	/*
374 375 376
	 * spanned_pages is the total pages spanned by the zone, including
	 * holes, which is calculated as:
	 * 	spanned_pages = zone_end_pfn - zone_start_pfn;
377
	 *
378 379
	 * present_pages is physical pages existing within the zone, which
	 * is calculated as:
380
	 *	present_pages = spanned_pages - absent_pages(pages in holes);
381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400
	 *
	 * managed_pages is present pages managed by the buddy system, which
	 * is calculated as (reserved_pages includes pages allocated by the
	 * bootmem allocator):
	 *	managed_pages = present_pages - reserved_pages;
	 *
	 * So present_pages may be used by memory hotplug or memory power
	 * management logic to figure out unmanaged pages by checking
	 * (present_pages - managed_pages). And managed_pages should be used
	 * by page allocator and vm scanner to calculate all kinds of watermarks
	 * and thresholds.
	 *
	 * Locking rules:
	 *
	 * zone_start_pfn and spanned_pages are protected by span_seqlock.
	 * It is a seqlock because it has to be read outside of zone->lock,
	 * and it is done in the main allocator path.  But, it is written
	 * quite infrequently.
	 *
	 * The span_seq lock is declared along with zone->lock because it is
401 402
	 * frequently read in proximity to zone->lock.  It's good to
	 * give them a chance of being in the same cacheline.
403
	 *
404
	 * Write access to present_pages at runtime should be protected by
405 406
	 * mem_hotplug_begin/end(). Any reader who can't tolerant drift of
	 * present_pages should get_online_mems() to get a stable value.
407 408 409 410 411 412
	 *
	 * Read access to managed_pages should be safe because it's unsigned
	 * long. Write access to zone->managed_pages and totalram_pages are
	 * protected by managed_page_count_lock at runtime. Idealy only
	 * adjust_managed_page_count() should be used instead of directly
	 * touching zone->managed_pages and totalram_pages.
413
	 */
414
	unsigned long		managed_pages;
415 416
	unsigned long		spanned_pages;
	unsigned long		present_pages;
417 418

	const char		*name;
L
Linus Torvalds 已提交
419

420 421 422 423 424 425 426 427 428
#ifdef CONFIG_MEMORY_ISOLATION
	/*
	 * Number of isolated pageblock. It is used to solve incorrect
	 * freepage counting problem due to racy retrieving migratetype
	 * of pageblock. Protected by zone->lock.
	 */
	unsigned long		nr_isolate_pageblock;
#endif

429 430 431 432 433
#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;
#endif

L
Linus Torvalds 已提交
434
	/*
435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456
	 * wait_table		-- the array holding the hash table
	 * wait_table_hash_nr_entries	-- the size of the hash table array
	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
	 *
	 * The purpose of all these is to keep track of the people
	 * waiting for a page to become available and make them
	 * runnable again when possible. The trouble is that this
	 * consumes a lot of space, especially when so few things
	 * wait on pages at a given time. So instead of using
	 * per-page waitqueues, we use a waitqueue hash table.
	 *
	 * The bucket discipline is to sleep on the same queue when
	 * colliding and wake all in that wait queue when removing.
	 * When something wakes, it must check to be sure its page is
	 * truly available, a la thundering herd. The cost of a
	 * collision is great, but given the expected load of the
	 * table, they should be so rare as to be outweighed by the
	 * benefits from the saved space.
	 *
	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
	 * primary users of these fields, and in mm/page_alloc.c
	 * free_area_init_core() performs the initialization of them.
L
Linus Torvalds 已提交
457
	 */
458 459 460 461
	wait_queue_head_t	*wait_table;
	unsigned long		wait_table_hash_nr_entries;
	unsigned long		wait_table_bits;

462
	/* Write-intensive fields used from the page allocator */
463
	ZONE_PADDING(_pad1_)
464

465 466 467 468 469 470
	/* free areas of different sizes */
	struct free_area	free_area[MAX_ORDER];

	/* zone flags, see below */
	unsigned long		flags;

471
	/* Primarily protects free_area */
472 473
	spinlock_t		lock;

474
	/* Write-intensive fields used by compaction and vmstats. */
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506
	ZONE_PADDING(_pad2_)

	/*
	 * When free pages are below this point, additional steps are taken
	 * when reading the number of free pages to avoid per-cpu counter
	 * drift allowing watermarks to be breached
	 */
	unsigned long percpu_drift_mark;

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* pfn where compaction free scanner should start */
	unsigned long		compact_cached_free_pfn;
	/* pfn where async and sync compaction migration scanner should start */
	unsigned long		compact_cached_migrate_pfn[2];
#endif

#ifdef CONFIG_COMPACTION
	/*
	 * On compaction failure, 1<<compact_defer_shift compactions
	 * are skipped before trying again. The number attempted since
	 * last failure is tracked with compact_considered.
	 */
	unsigned int		compact_considered;
	unsigned int		compact_defer_shift;
	int			compact_order_failed;
#endif

#if defined CONFIG_COMPACTION || defined CONFIG_CMA
	/* Set to true when the PG_migrate_skip bits should be cleared */
	bool			compact_blockskip_flush;
#endif

507 508
	bool			contiguous;

509 510 511
	ZONE_PADDING(_pad3_)
	/* Zone statistics */
	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
512
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
513

M
Mel Gorman 已提交
514 515
enum pgdat_flags {
	PGDAT_CONGESTED,		/* pgdat has many dirty pages backed by
516 517
					 * a congested BDI
					 */
M
Mel Gorman 已提交
518
	PGDAT_DIRTY,			/* reclaim scanning has recently found
519 520 521
					 * many dirty file pages at the tail
					 * of the LRU.
					 */
M
Mel Gorman 已提交
522
	PGDAT_WRITEBACK,		/* reclaim scanning has recently found
523 524
					 * many pages under writeback
					 */
525
	PGDAT_RECLAIM_LOCKED,		/* prevents concurrent reclaim */
J
Johannes Weiner 已提交
526
};
527

R
Russ Anderson 已提交
528
static inline unsigned long zone_end_pfn(const struct zone *zone)
529 530 531 532 533 534 535 536 537
{
	return zone->zone_start_pfn + zone->spanned_pages;
}

static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
{
	return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
}

538 539 540 541 542 543 544 545 546 547
static inline bool zone_is_initialized(struct zone *zone)
{
	return !!zone->wait_table;
}

static inline bool zone_is_empty(struct zone *zone)
{
	return zone->spanned_pages == 0;
}

L
Linus Torvalds 已提交
548 549 550 551 552 553 554
/*
 * The "priority" of VM scanning is how much of the queues we will scan in one
 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
 * queues ("queue_length >> 12") during an aging round.
 */
#define DEF_PRIORITY 12

555 556 557
/* Maximum number of zones on a zonelist */
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)

558 559
enum {
	ZONELIST_FALLBACK,	/* zonelist with fallback */
560
#ifdef CONFIG_NUMA
561 562 563 564 565
	/*
	 * The NUMA zonelists are doubled because we need zonelists that
	 * restrict the allocations to a single node for __GFP_THISNODE.
	 */
	ZONELIST_NOFALLBACK,	/* zonelist without fallback (__GFP_THISNODE) */
566
#endif
567 568
	MAX_ZONELISTS
};
569

570 571 572 573 574 575 576 577 578
/*
 * This struct contains information about a zone in a zonelist. It is stored
 * here to avoid dereferences into large structures and lookups of tables
 */
struct zoneref {
	struct zone *zone;	/* Pointer to actual zone */
	int zone_idx;		/* zone_idx(zoneref->zone) */
};

L
Linus Torvalds 已提交
579 580 581 582 583 584
/*
 * One allocation request operates on a zonelist. A zonelist
 * is a list of zones, the first one is the 'goal' of the
 * allocation, the other zones are fallback zones, in decreasing
 * priority.
 *
585 586 587 588 589 590 591
 * To speed the reading of the zonelist, the zonerefs contain the zone index
 * of the entry being read. Helper functions to access information given
 * a struct zoneref are
 *
 * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs
 * zonelist_zone_idx()	- Return the index of the zone for an entry
 * zonelist_node_idx()	- Return the index of the node for an entry
L
Linus Torvalds 已提交
592 593
 */
struct zonelist {
594
	struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
L
Linus Torvalds 已提交
595 596
};

597 598 599 600 601
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
#endif

L
Linus Torvalds 已提交
602 603 604 605 606 607 608 609 610 611 612 613 614 615
/*
 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
 * (mostly NUMA machines?) to denote a higher-level memory zone than the
 * zone denotes.
 *
 * On NUMA machines, each NUMA node would have a pg_data_t to describe
 * it's memory layout.
 *
 * Memory statistics and page replacement data structures are maintained on a
 * per-zone basis.
 */
struct bootmem_data;
typedef struct pglist_data {
	struct zone node_zones[MAX_NR_ZONES];
616
	struct zonelist node_zonelists[MAX_ZONELISTS];
L
Linus Torvalds 已提交
617
	int nr_zones;
618
#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
L
Linus Torvalds 已提交
619
	struct page *node_mem_map;
620 621 622
#ifdef CONFIG_PAGE_EXTENSION
	struct page_ext *node_page_ext;
#endif
A
Andy Whitcroft 已提交
623
#endif
624
#ifndef CONFIG_NO_BOOTMEM
L
Linus Torvalds 已提交
625
	struct bootmem_data *bdata;
626
#endif
627 628 629 630 631 632
#ifdef CONFIG_MEMORY_HOTPLUG
	/*
	 * Must be held any time you expect node_start_pfn, node_present_pages
	 * or node_spanned_pages stay constant.  Holding this will also
	 * guarantee that any pfn_valid() stays that way.
	 *
633 634 635
	 * pgdat_resize_lock() and pgdat_resize_unlock() are provided to
	 * manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG.
	 *
636
	 * Nests above zone->lock and zone->span_seqlock
637 638 639
	 */
	spinlock_t node_size_lock;
#endif
L
Linus Torvalds 已提交
640 641 642 643 644 645
	unsigned long node_start_pfn;
	unsigned long node_present_pages; /* total number of physical pages */
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
	int node_id;
	wait_queue_head_t kswapd_wait;
646
	wait_queue_head_t pfmemalloc_wait;
647 648
	struct task_struct *kswapd;	/* Protected by
					   mem_hotplug_begin/end() */
649 650 651
	int kswapd_order;
	enum zone_type kswapd_classzone_idx;

652 653 654 655 656 657
#ifdef CONFIG_COMPACTION
	int kcompactd_max_order;
	enum zone_type kcompactd_classzone_idx;
	wait_queue_head_t kcompactd_wait;
	struct task_struct *kcompactd;
#endif
658
#ifdef CONFIG_NUMA_BALANCING
659
	/* Lock serializing the migrate rate limiting window */
660 661 662 663 664 665 666 667
	spinlock_t numabalancing_migrate_lock;

	/* Rate limiting time interval */
	unsigned long numabalancing_migrate_next_window;

	/* Number of pages migrated during the rate limiting time interval */
	unsigned long numabalancing_migrate_nr_pages;
#endif
668 669 670 671 672 673
	/*
	 * This is a per-node reserve of pages that are not available
	 * to userspace allocations.
	 */
	unsigned long		totalreserve_pages;

674 675 676 677 678 679 680 681
#ifdef CONFIG_NUMA
	/*
	 * zone reclaim becomes active if more unmapped pages exist.
	 */
	unsigned long		min_unmapped_pages;
	unsigned long		min_slab_pages;
#endif /* CONFIG_NUMA */

682 683 684
	/* Write-intensive fields used by page reclaim */
	ZONE_PADDING(_pad1_)
	spinlock_t		lru_lock;
685 686 687 688 689 690 691 692

#ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
	/*
	 * If memory initialisation on large machines is deferred then this
	 * is the first PFN that needs to be initialised.
	 */
	unsigned long first_deferred_pfn;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
693 694 695 696 697 698

#ifdef CONFIG_TRANSPARENT_HUGEPAGE
	spinlock_t split_queue_lock;
	struct list_head split_queue;
	unsigned long split_queue_len;
#endif
699

M
Mel Gorman 已提交
700 701 702 703 704 705 706 707 708 709 710 711 712
	/* Fields commonly accessed by the page reclaim scanner */
	struct lruvec		lruvec;

	/*
	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
	 * this node's LRU.  Maintained by the pageout code.
	 */
	unsigned int inactive_ratio;

	unsigned long		flags;

	ZONE_PADDING(_pad2_)

713 714 715
	/* Per-node vmstats */
	struct per_cpu_nodestat __percpu *per_cpu_nodestats;
	atomic_long_t		vm_stat[NR_VM_NODE_STAT_ITEMS];
L
Linus Torvalds 已提交
716 717 718 719
} pg_data_t;

#define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
A
Andy Whitcroft 已提交
720
#ifdef CONFIG_FLAT_NODE_MEM_MAP
721
#define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
A
Andy Whitcroft 已提交
722 723 724
#else
#define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
#endif
725
#define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
L
Linus Torvalds 已提交
726

727
#define node_start_pfn(nid)	(NODE_DATA(nid)->node_start_pfn)
728
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
729 730 731 732
static inline spinlock_t *zone_lru_lock(struct zone *zone)
{
	return &zone->zone_pgdat->lru_lock;
}
733

734
static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
M
Mel Gorman 已提交
735
{
736
	return &pgdat->lruvec;
M
Mel Gorman 已提交
737 738
}

739 740 741 742 743 744 745 746 747
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
	return pgdat->node_start_pfn + pgdat->node_spanned_pages;
}

static inline bool pgdat_is_empty(pg_data_t *pgdat)
{
	return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
748

749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767
static inline int zone_id(const struct zone *zone)
{
	struct pglist_data *pgdat = zone->zone_pgdat;

	return zone - pgdat->node_zones;
}

#ifdef CONFIG_ZONE_DEVICE
static inline bool is_dev_zone(const struct zone *zone)
{
	return zone_id(zone) == ZONE_DEVICE;
}
#else
static inline bool is_dev_zone(const struct zone *zone)
{
	return false;
}
#endif

768 769
#include <linux/memory_hotplug.h>

770
extern struct mutex zonelists_mutex;
771
void build_all_zonelists(pg_data_t *pgdat, struct zone *zone);
772
void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx);
773 774 775
bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
			 int classzone_idx, unsigned int alloc_flags,
			 long free_pages);
776
bool zone_watermark_ok(struct zone *z, unsigned int order,
777 778
		unsigned long mark, int classzone_idx,
		unsigned int alloc_flags);
779
bool zone_watermark_ok_safe(struct zone *z, unsigned int order,
780
		unsigned long mark, int classzone_idx);
D
Dave Hansen 已提交
781 782 783 784
enum memmap_context {
	MEMMAP_EARLY,
	MEMMAP_HOTPLUG,
};
785
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
786
				     unsigned long size);
787

788
extern void lruvec_init(struct lruvec *lruvec);
789

M
Mel Gorman 已提交
790
static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
791
{
A
Andrew Morton 已提交
792
#ifdef CONFIG_MEMCG
M
Mel Gorman 已提交
793
	return lruvec->pgdat;
794
#else
M
Mel Gorman 已提交
795
	return container_of(lruvec, struct pglist_data, lruvec);
796 797 798
#endif
}

799 800
extern unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru);

L
Linus Torvalds 已提交
801 802 803 804 805 806
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif

807 808 809 810 811 812
#ifdef CONFIG_HAVE_MEMORYLESS_NODES
int local_memory_node(int node_id);
#else
static inline int local_memory_node(int node_id) { return node_id; };
#endif

L
Linus Torvalds 已提交
813 814 815 816 817 818 819 820 821
#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
#endif

/*
 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
 */
#define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)

822 823 824 825 826
static inline int populated_zone(struct zone *zone)
{
	return (!!zone->present_pages);
}

M
Mel Gorman 已提交
827 828
extern int movable_zone;

829
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
830 831
static inline int zone_movable_is_highmem(void)
{
832
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
M
Mel Gorman 已提交
833 834
	return movable_zone == ZONE_HIGHMEM;
#else
835
	return (ZONE_MOVABLE - 1) == ZONE_HIGHMEM;
M
Mel Gorman 已提交
836 837
#endif
}
838
#endif
M
Mel Gorman 已提交
839

840
static inline int is_highmem_idx(enum zone_type idx)
L
Linus Torvalds 已提交
841
{
842
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
843 844
	return (idx == ZONE_HIGHMEM ||
		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
845 846 847
#else
	return 0;
#endif
L
Linus Torvalds 已提交
848 849 850 851 852 853 854 855 856 857
}

/**
 * is_highmem - helper function to quickly check if a struct zone is a 
 *              highmem zone or not.  This is an attempt to keep references
 *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
 * @zone - pointer to struct zone variable
 */
static inline int is_highmem(struct zone *zone)
{
858
#ifdef CONFIG_HIGHMEM
C
Chanho Min 已提交
859
	return is_highmem_idx(zone_idx(zone));
860 861 862
#else
	return 0;
#endif
L
Linus Torvalds 已提交
863 864 865 866
}

/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
867
int min_free_kbytes_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
868
					void __user *, size_t *, loff_t *);
869 870
int watermark_scale_factor_sysctl_handler(struct ctl_table *, int,
					void __user *, size_t *, loff_t *);
L
Linus Torvalds 已提交
871
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
872
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int,
L
Linus Torvalds 已提交
873
					void __user *, size_t *, loff_t *);
874
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int,
875
					void __user *, size_t *, loff_t *);
876
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
877
			void __user *, size_t *, loff_t *);
878
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
879
			void __user *, size_t *, loff_t *);
L
Linus Torvalds 已提交
880

881
extern int numa_zonelist_order_handler(struct ctl_table *, int,
882
			void __user *, size_t *, loff_t *);
883 884 885
extern char numa_zonelist_order[];
#define NUMA_ZONELIST_ORDER_LEN 16	/* string buffer size */

886
#ifndef CONFIG_NEED_MULTIPLE_NODES
L
Linus Torvalds 已提交
887 888 889 890 891

extern struct pglist_data contig_page_data;
#define NODE_DATA(nid)		(&contig_page_data)
#define NODE_MEM_MAP(nid)	mem_map

892
#else /* CONFIG_NEED_MULTIPLE_NODES */
L
Linus Torvalds 已提交
893 894 895

#include <asm/mmzone.h>

896
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
897

898 899 900
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
901 902

/**
903
 * for_each_online_pgdat - helper macro to iterate over all online nodes
904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921
 * @pgdat - pointer to a pg_data_t variable
 */
#define for_each_online_pgdat(pgdat)			\
	for (pgdat = first_online_pgdat();		\
	     pgdat;					\
	     pgdat = next_online_pgdat(pgdat))
/**
 * for_each_zone - helper macro to iterate over all memory zones
 * @zone - pointer to struct zone variable
 *
 * The user only needs to declare the zone variable, for_each_zone
 * fills it in.
 */
#define for_each_zone(zone)			        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))

922 923 924 925 926 927 928 929
#define for_each_populated_zone(zone)		        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))			\
		if (!populated_zone(zone))		\
			; /* do nothing */		\
		else

930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
	return zoneref->zone;
}

static inline int zonelist_zone_idx(struct zoneref *zoneref)
{
	return zoneref->zone_idx;
}

static inline int zonelist_node_idx(struct zoneref *zoneref)
{
#ifdef CONFIG_NUMA
	/* zone_to_nid not available in this context */
	return zoneref->zone->node;
#else
	return 0;
#endif /* CONFIG_NUMA */
}

950 951 952 953
struct zoneref *__next_zones_zonelist(struct zoneref *z,
					enum zone_type highest_zoneidx,
					nodemask_t *nodes);

954 955 956 957 958 959 960 961
/**
 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
 * @z - The cursor used as a starting point for the search
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
 *
 * This function returns the next zone at or below a given zone index that is
 * within the allowed nodemask using a cursor as the starting point for the
962 963 964
 * search. The zoneref returned is a cursor that represents the current zone
 * being examined. It should be advanced by one before calling
 * next_zones_zonelist again.
965
 */
966
static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
967
					enum zone_type highest_zoneidx,
968 969 970 971 972 973
					nodemask_t *nodes)
{
	if (likely(!nodes && zonelist_zone_idx(z) <= highest_zoneidx))
		return z;
	return __next_zones_zonelist(z, highest_zoneidx, nodes);
}
974

975 976 977 978 979 980 981 982 983
/**
 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
 * @zonelist - The zonelist to search for a suitable zone
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
 * @zone - The first suitable zone found is returned via this parameter
 *
 * This function returns the first zone at or below a given zone index that is
 * within the allowed nodemask. The zoneref returned is a cursor that can be
984 985
 * used to iterate the zonelist with next_zones_zonelist by advancing it by
 * one before calling.
986
 */
987
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
988
					enum zone_type highest_zoneidx,
989
					nodemask_t *nodes)
990
{
991
	return next_zones_zonelist(zonelist->_zonerefs,
992
							highest_zoneidx, nodes);
993 994
}

995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006
/**
 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 * @nodemask - Nodemask allowed by the allocator
 *
 * This iterator iterates though all zones at or below a given zone index and
 * within a given nodemask
 */
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
1007
	for (z = first_zones_zonelist(zlist, highidx, nodemask), zone = zonelist_zone(z);	\
1008
		zone;							\
1009
		z = next_zones_zonelist(++z, highidx, nodemask),	\
1010 1011 1012 1013 1014 1015 1016 1017
			zone = zonelist_zone(z))

#define for_next_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
	for (zone = z->zone;	\
		zone;							\
		z = next_zones_zonelist(++z, highidx, nodemask),	\
			zone = zonelist_zone(z))

1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028

/**
 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 *
 * This iterator iterates though all zones at or below a given zone index.
 */
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
1029
	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
1030

A
Andy Whitcroft 已提交
1031 1032 1033 1034
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif

1035
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
T
Tejun Heo 已提交
1036
	!defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
1037 1038 1039 1040
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
{
	return 0;
}
1041 1042
#endif

1043 1044 1045 1046
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn)		(0)
#endif

A
Andy Whitcroft 已提交
1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062
#ifdef CONFIG_SPARSEMEM

/*
 * SECTION_SHIFT    		#bits space required to store a section #
 *
 * PA_SECTION_SHIFT		physical address to/from section number
 * PFN_SECTION_SHIFT		pfn to/from section number
 */
#define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
#define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)

#define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)

#define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))

1063
#define SECTION_BLOCKFLAGS_BITS \
1064
	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
1065

A
Andy Whitcroft 已提交
1066 1067 1068 1069
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif

1070 1071 1072
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)

1073 1074 1075
#define SECTION_ALIGN_UP(pfn)	(((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK)
#define SECTION_ALIGN_DOWN(pfn)	((pfn) & PAGE_SECTION_MASK)

A
Andy Whitcroft 已提交
1076
struct page;
1077
struct page_ext;
A
Andy Whitcroft 已提交
1078
struct mem_section {
A
Andy Whitcroft 已提交
1079 1080 1081 1082 1083
	/*
	 * This is, logically, a pointer to an array of struct
	 * pages.  However, it is stored with some other magic.
	 * (see sparse.c::sparse_init_one_section())
	 *
1084 1085 1086 1087
	 * Additionally during early boot we encode node id of
	 * the location of the section here to guide allocation.
	 * (see sparse.c::memory_present())
	 *
A
Andy Whitcroft 已提交
1088 1089 1090 1091
	 * Making it a UL at least makes someone do a cast
	 * before using it wrong.
	 */
	unsigned long section_mem_map;
1092 1093 1094

	/* See declaration of similar field in struct zone */
	unsigned long *pageblock_flags;
1095 1096
#ifdef CONFIG_PAGE_EXTENSION
	/*
1097
	 * If SPARSEMEM, pgdat doesn't have page_ext pointer. We use
1098 1099 1100 1101 1102
	 * section. (see page_ext.h about this.)
	 */
	struct page_ext *page_ext;
	unsigned long pad;
#endif
1103 1104 1105 1106
	/*
	 * WARNING: mem_section must be a power-of-2 in size for the
	 * calculation and use of SECTION_ROOT_MASK to make sense.
	 */
A
Andy Whitcroft 已提交
1107 1108
};

1109 1110 1111 1112 1113
#ifdef CONFIG_SPARSEMEM_EXTREME
#define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section))
#else
#define SECTIONS_PER_ROOT	1
#endif
B
Bob Picco 已提交
1114

1115
#define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
1116
#define NR_SECTION_ROOTS	DIV_ROUND_UP(NR_MEM_SECTIONS, SECTIONS_PER_ROOT)
1117
#define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
B
Bob Picco 已提交
1118

1119 1120
#ifdef CONFIG_SPARSEMEM_EXTREME
extern struct mem_section *mem_section[NR_SECTION_ROOTS];
B
Bob Picco 已提交
1121
#else
1122 1123
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
A
Andy Whitcroft 已提交
1124

A
Andy Whitcroft 已提交
1125 1126
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
1127 1128 1129
	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
		return NULL;
	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
A
Andy Whitcroft 已提交
1130
}
1131
extern int __section_nr(struct mem_section* ms);
1132
extern unsigned long usemap_size(void);
A
Andy Whitcroft 已提交
1133 1134 1135 1136 1137 1138 1139 1140 1141 1142

/*
 * We use the lower bits of the mem_map pointer to store
 * a little bit of information.  There should be at least
 * 3 bits here due to 32-bit alignment.
 */
#define	SECTION_MARKED_PRESENT	(1UL<<0)
#define SECTION_HAS_MEM_MAP	(1UL<<1)
#define SECTION_MAP_LAST_BIT	(1UL<<2)
#define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
1143
#define SECTION_NID_SHIFT	2
A
Andy Whitcroft 已提交
1144 1145 1146 1147 1148 1149 1150 1151

static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
	unsigned long map = section->section_mem_map;
	map &= SECTION_MAP_MASK;
	return (struct page *)map;
}

1152
static inline int present_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1153
{
B
Bob Picco 已提交
1154
	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
A
Andy Whitcroft 已提交
1155 1156
}

1157 1158 1159 1160 1161 1162
static inline int present_section_nr(unsigned long nr)
{
	return present_section(__nr_to_section(nr));
}

static inline int valid_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1163
{
B
Bob Picco 已提交
1164
	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
A
Andy Whitcroft 已提交
1165 1166 1167 1168 1169 1170 1171
}

static inline int valid_section_nr(unsigned long nr)
{
	return valid_section(__nr_to_section(nr));
}

A
Andy Whitcroft 已提交
1172 1173
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
A
Andy Whitcroft 已提交
1174
	return __nr_to_section(pfn_to_section_nr(pfn));
A
Andy Whitcroft 已提交
1175 1176
}

1177
#ifndef CONFIG_HAVE_ARCH_PFN_VALID
A
Andy Whitcroft 已提交
1178 1179 1180 1181
static inline int pfn_valid(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
A
Andy Whitcroft 已提交
1182
	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
A
Andy Whitcroft 已提交
1183
}
1184
#endif
A
Andy Whitcroft 已提交
1185

1186 1187 1188 1189 1190 1191 1192
static inline int pfn_present(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
}

A
Andy Whitcroft 已提交
1193 1194 1195 1196 1197 1198
/*
 * These are _only_ used during initialisation, therefore they
 * can use __initdata ...  They could have names to indicate
 * this restriction.
 */
#ifdef CONFIG_NUMA
1199 1200 1201 1202 1203
#define pfn_to_nid(pfn)							\
({									\
	unsigned long __pfn_to_nid_pfn = (pfn);				\
	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\
})
1204 1205
#else
#define pfn_to_nid(pfn)		(0)
A
Andy Whitcroft 已提交
1206 1207 1208 1209 1210 1211
#endif

#define early_pfn_valid(pfn)	pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init()	do {} while (0)
1212
#define sparse_index_init(_sec, _nid)  do {} while (0)
A
Andy Whitcroft 已提交
1213 1214
#endif /* CONFIG_SPARSEMEM */

1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225
/*
 * During memory init memblocks map pfns to nids. The search is expensive and
 * this caches recent lookups. The implementation of __early_pfn_to_nid
 * may treat start/end as pfns or sections.
 */
struct mminit_pfnnid_cache {
	unsigned long last_start;
	unsigned long last_end;
	int last_nid;
};

A
Andy Whitcroft 已提交
1226 1227 1228 1229 1230 1231 1232
#ifndef early_pfn_valid
#define early_pfn_valid(pfn)	(1)
#endif

void memory_present(int nid, unsigned long start, unsigned long end);
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);

1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244
/*
 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
 * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
 * pfn_valid_within() should be used in this case; we optimise this away
 * when we have no holes within a MAX_ORDER_NR_PAGES block.
 */
#ifdef CONFIG_HOLES_IN_ZONE
#define pfn_valid_within(pfn) pfn_valid(pfn)
#else
#define pfn_valid_within(pfn) (1)
#endif

1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
/*
 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
 * associated with it or not. In FLATMEM, it is expected that holes always
 * have valid memmap as long as there is valid PFNs either side of the hole.
 * In SPARSEMEM, it is assumed that a valid section has a memmap for the
 * entire section.
 *
 * However, an ARM, and maybe other embedded architectures in the future
 * free memmap backing holes to save memory on the assumption the memmap is
 * never used. The page_zone linkages are then broken even though pfn_valid()
 * returns true. A walker of the full memmap must then do this additional
 * check to ensure the memmap they are looking at is sane by making sure
 * the zone and PFN linkages are still valid. This is expensive, but walkers
 * of the full memmap are extremely rare.
 */
1261
bool memmap_valid_within(unsigned long pfn,
1262 1263
					struct page *page, struct zone *zone);
#else
1264
static inline bool memmap_valid_within(unsigned long pfn,
1265 1266
					struct page *page, struct zone *zone)
{
1267
	return true;
1268 1269 1270
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */

C
Christoph Lameter 已提交
1271
#endif /* !__GENERATING_BOUNDS.H */
L
Linus Torvalds 已提交
1272 1273
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */