mmzone.h 34.8 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H

#ifndef __ASSEMBLY__
C
Christoph Lameter 已提交
5
#ifndef __GENERATING_BOUNDS_H
L
Linus Torvalds 已提交
6 7 8 9

#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
10
#include <linux/bitops.h>
L
Linus Torvalds 已提交
11 12 13 14
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
15
#include <linux/seqlock.h>
16
#include <linux/nodemask.h>
17
#include <linux/pageblock-flags.h>
C
Christoph Lameter 已提交
18
#include <linux/bounds.h>
L
Linus Torvalds 已提交
19
#include <asm/atomic.h>
R
Ralf Baechle 已提交
20
#include <asm/page.h>
L
Linus Torvalds 已提交
21 22 23 24 25 26 27

/* Free memory management - zoned buddy allocator.  */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define MAX_ORDER 11
#else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif
28
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
L
Linus Torvalds 已提交
29

A
Andy Whitcroft 已提交
30 31 32 33 34 35 36 37
/*
 * PAGE_ALLOC_COSTLY_ORDER is the order at which allocations are deemed
 * costly to service.  That is between allocation orders which should
 * coelesce naturally under reasonable reclaim pressure and those which
 * will not.
 */
#define PAGE_ALLOC_COSTLY_ORDER 3

38
#define MIGRATE_UNMOVABLE     0
39 40
#define MIGRATE_RECLAIMABLE   1
#define MIGRATE_MOVABLE       2
41
#define MIGRATE_RESERVE       3
K
KAMEZAWA Hiroyuki 已提交
42 43
#define MIGRATE_ISOLATE       4 /* can't allocate from here */
#define MIGRATE_TYPES         5
44 45 46 47 48

#define for_each_migratetype_order(order, type) \
	for (order = 0; order < MAX_ORDER; order++) \
		for (type = 0; type < MIGRATE_TYPES; type++)

49 50 51 52 53 54 55
extern int page_group_by_mobility_disabled;

static inline int get_pageblock_migratetype(struct page *page)
{
	return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end);
}

L
Linus Torvalds 已提交
56
struct free_area {
57
	struct list_head	free_list[MIGRATE_TYPES];
L
Linus Torvalds 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70 71
	unsigned long		nr_free;
};

struct pglist_data;

/*
 * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
 * So add a wild amount of padding here to ensure that they fall into separate
 * cachelines.  There are very few zone structures in the machine, so space
 * consumption is not a concern here.
 */
#if defined(CONFIG_SMP)
struct zone_padding {
	char x[0];
72
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
73 74 75 76 77
#define ZONE_PADDING(name)	struct zone_padding name;
#else
#define ZONE_PADDING(name)
#endif

78
enum zone_stat_item {
79
	/* First 128 byte cacheline (assuming 64 bit words) */
80
	NR_FREE_PAGES,
81
	NR_LRU_BASE,
82 83 84 85
	NR_INACTIVE_ANON = NR_LRU_BASE, /* must match order of LRU_[IN]ACTIVE */
	NR_ACTIVE_ANON,		/*  "     "     "   "       "         */
	NR_INACTIVE_FILE,	/*  "     "     "   "       "         */
	NR_ACTIVE_FILE,		/*  "     "     "   "       "         */
L
Lee Schermerhorn 已提交
86
	NR_UNEVICTABLE,		/*  "     "     "   "       "         */
N
Nick Piggin 已提交
87
	NR_MLOCK,		/* mlock()ed pages found and moved off LRU */
88 89
	NR_ANON_PAGES,	/* Mapped anonymous pages */
	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
90
			   only modified from process context */
91
	NR_FILE_PAGES,
92
	NR_FILE_DIRTY,
93
	NR_WRITEBACK,
94 95 96
	NR_SLAB_RECLAIMABLE,
	NR_SLAB_UNRECLAIMABLE,
	NR_PAGETABLE,		/* used for pagetables */
97 98
	NR_KERNEL_STACK,
	/* Second 128 byte cacheline */
99
	NR_UNSTABLE_NFS,	/* NFS unstable pages */
100
	NR_BOUNCE,
101
	NR_VMSCAN_WRITE,
102
	NR_WRITEBACK_TEMP,	/* Writeback using temporary buffers */
K
KOSAKI Motohiro 已提交
103 104
	NR_ISOLATED_ANON,	/* Temporary isolated pages from anon lru */
	NR_ISOLATED_FILE,	/* Temporary isolated pages from file lru */
105
	NR_SHMEM,		/* shmem pages (included tmpfs/GEM pages) */
106 107 108 109 110 111 112 113
#ifdef CONFIG_NUMA
	NUMA_HIT,		/* allocated in intended node */
	NUMA_MISS,		/* allocated in non intended node */
	NUMA_FOREIGN,		/* was intended here, hit elsewhere */
	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
	NUMA_LOCAL,		/* allocation from local node */
	NUMA_OTHER,		/* allocation from other node */
#endif
114 115
	NR_VM_ZONE_STAT_ITEMS };

116 117 118 119 120 121 122 123 124 125 126 127 128
/*
 * We do arithmetic on the LRU lists in various places in the code,
 * so it is important to keep the active lists LRU_ACTIVE higher in
 * the array than the corresponding inactive lists, and to keep
 * the *_FILE lists LRU_FILE higher than the corresponding _ANON lists.
 *
 * This has to be kept in sync with the statistics in zone_stat_item
 * above and the descriptions in vmstat_text in mm/vmstat.c
 */
#define LRU_BASE 0
#define LRU_ACTIVE 1
#define LRU_FILE 2

129
enum lru_list {
130 131 132 133
	LRU_INACTIVE_ANON = LRU_BASE,
	LRU_ACTIVE_ANON = LRU_BASE + LRU_ACTIVE,
	LRU_INACTIVE_FILE = LRU_BASE + LRU_FILE,
	LRU_ACTIVE_FILE = LRU_BASE + LRU_FILE + LRU_ACTIVE,
L
Lee Schermerhorn 已提交
134 135 136
	LRU_UNEVICTABLE,
	NR_LRU_LISTS
};
137 138 139

#define for_each_lru(l) for (l = 0; l < NR_LRU_LISTS; l++)

L
Lee Schermerhorn 已提交
140 141
#define for_each_evictable_lru(l) for (l = 0; l <= LRU_ACTIVE_FILE; l++)

142 143 144 145 146
static inline int is_file_lru(enum lru_list l)
{
	return (l == LRU_INACTIVE_FILE || l == LRU_ACTIVE_FILE);
}

147 148
static inline int is_active_lru(enum lru_list l)
{
149
	return (l == LRU_ACTIVE_ANON || l == LRU_ACTIVE_FILE);
150 151
}

L
Lee Schermerhorn 已提交
152 153 154 155 156
static inline int is_unevictable_lru(enum lru_list l)
{
	return (l == LRU_UNEVICTABLE);
}

157 158 159 160 161 162 163 164 165 166 167
enum zone_watermarks {
	WMARK_MIN,
	WMARK_LOW,
	WMARK_HIGH,
	NR_WMARK
};

#define min_wmark_pages(z) (z->watermark[WMARK_MIN])
#define low_wmark_pages(z) (z->watermark[WMARK_LOW])
#define high_wmark_pages(z) (z->watermark[WMARK_HIGH])

L
Linus Torvalds 已提交
168 169 170 171 172 173 174 175
struct per_cpu_pages {
	int count;		/* number of pages in the list */
	int high;		/* high watermark, emptying needed */
	int batch;		/* chunk size for buddy add/remove */
	struct list_head list;	/* the list of pages */
};

struct per_cpu_pageset {
176
	struct per_cpu_pages pcp;
177 178 179
#ifdef CONFIG_NUMA
	s8 expire;
#endif
180
#ifdef CONFIG_SMP
181
	s8 stat_threshold;
182 183
	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
L
Linus Torvalds 已提交
184 185
} ____cacheline_aligned_in_smp;

186 187 188 189 190 191
#ifdef CONFIG_NUMA
#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
#else
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
#endif

C
Christoph Lameter 已提交
192 193
#endif /* !__GENERATING_BOUNDS.H */

194
enum zone_type {
195
#ifdef CONFIG_ZONE_DMA
196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
	/*
	 * ZONE_DMA is used when there are devices that are not able
	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
	 * carve out the portion of memory that is needed for these devices.
	 * The range is arch specific.
	 *
	 * Some examples
	 *
	 * Architecture		Limit
	 * ---------------------------
	 * parisc, ia64, sparc	<4G
	 * s390			<2G
	 * arm			Various
	 * alpha		Unlimited or 0-16MB.
	 *
	 * i386, x86_64 and multiple other arches
	 * 			<16M.
	 */
	ZONE_DMA,
215
#endif
216
#ifdef CONFIG_ZONE_DMA32
217 218 219 220 221 222
	/*
	 * x86_64 needs two ZONE_DMAs because it supports devices that are
	 * only able to do DMA to the lower 16M but also 32 bit devices that
	 * can only do DMA areas below 4G.
	 */
	ZONE_DMA32,
223
#endif
224 225 226 227 228 229
	/*
	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
	 * performed on pages in ZONE_NORMAL if the DMA devices support
	 * transfers to all addressable memory.
	 */
	ZONE_NORMAL,
230
#ifdef CONFIG_HIGHMEM
231 232 233 234 235 236 237 238 239
	/*
	 * A memory area that is only addressable by the kernel through
	 * mapping portions into its own address space. This is for example
	 * used by i386 to allow the kernel to address the memory beyond
	 * 900MB. The kernel will set up special mappings (page
	 * table entries on i386) for each page that the kernel needs to
	 * access.
	 */
	ZONE_HIGHMEM,
240
#endif
M
Mel Gorman 已提交
241
	ZONE_MOVABLE,
C
Christoph Lameter 已提交
242
	__MAX_NR_ZONES
243
};
L
Linus Torvalds 已提交
244

C
Christoph Lameter 已提交
245 246
#ifndef __GENERATING_BOUNDS_H

L
Linus Torvalds 已提交
247 248 249 250 251
/*
 * When a memory allocation must conform to specific limitations (such
 * as being suitable for DMA) the caller will pass in hints to the
 * allocator in the gfp_mask, in the zone modifier bits.  These bits
 * are used to select a priority ordered list of memory zones which
252
 * match the requested limits. See gfp_zone() in include/linux/gfp.h
L
Linus Torvalds 已提交
253
 */
254

C
Christoph Lameter 已提交
255
#if MAX_NR_ZONES < 2
256
#define ZONES_SHIFT 0
C
Christoph Lameter 已提交
257
#elif MAX_NR_ZONES <= 2
258
#define ZONES_SHIFT 1
C
Christoph Lameter 已提交
259
#elif MAX_NR_ZONES <= 4
260
#define ZONES_SHIFT 2
261 262
#else
#error ZONES_SHIFT -- too many zones configured adjust calculation
263
#endif
L
Linus Torvalds 已提交
264

265 266 267 268 269 270 271 272 273 274 275 276 277
struct zone_reclaim_stat {
	/*
	 * The pageout code in vmscan.c keeps track of how many of the
	 * mem/swap backed and file backed pages are refeferenced.
	 * The higher the rotated/scanned ratio, the more valuable
	 * that cache is.
	 *
	 * The anon LRU stats live in [0], file LRU stats in [1]
	 */
	unsigned long		recent_rotated[2];
	unsigned long		recent_scanned[2];
};

L
Linus Torvalds 已提交
278 279
struct zone {
	/* Fields commonly accessed by the page allocator */
280 281 282 283

	/* zone watermarks, access with *_wmark_pages(zone) macros */
	unsigned long watermark[NR_WMARK];

L
Linus Torvalds 已提交
284 285 286 287 288 289 290 291 292 293
	/*
	 * We don't know if the memory that we're going to allocate will be freeable
	 * or/and it will be released eventually, so to avoid totally wasting several
	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
	 * to run OOM on the lower zones despite there's tons of freeable ram
	 * on the higher zones). This array is recalculated at runtime if the
	 * sysctl_lowmem_reserve_ratio sysctl changes.
	 */
	unsigned long		lowmem_reserve[MAX_NR_ZONES];

294
#ifdef CONFIG_NUMA
295
	int node;
296 297 298
	/*
	 * zone reclaim becomes active if more unmapped pages exist.
	 */
299
	unsigned long		min_unmapped_pages;
300
	unsigned long		min_slab_pages;
301 302
	struct per_cpu_pageset	*pageset[NR_CPUS];
#else
L
Linus Torvalds 已提交
303
	struct per_cpu_pageset	pageset[NR_CPUS];
304
#endif
L
Linus Torvalds 已提交
305 306 307 308
	/*
	 * free areas of different sizes
	 */
	spinlock_t		lock;
309 310 311 312
#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;
#endif
L
Linus Torvalds 已提交
313 314
	struct free_area	free_area[MAX_ORDER];

315 316
#ifndef CONFIG_SPARSEMEM
	/*
317
	 * Flags for a pageblock_nr_pages block. See pageblock-flags.h.
318 319 320 321 322
	 * In SPARSEMEM, this map is stored in struct mem_section
	 */
	unsigned long		*pageblock_flags;
#endif /* CONFIG_SPARSEMEM */

L
Linus Torvalds 已提交
323 324 325 326 327

	ZONE_PADDING(_pad1_)

	/* Fields commonly accessed by the page reclaim scanner */
	spinlock_t		lru_lock;	
328
	struct zone_lru {
329
		struct list_head list;
330
		unsigned long nr_saved_scan;	/* accumulated for batching */
331
	} lru[NR_LRU_LISTS];
332

333
	struct zone_reclaim_stat reclaim_stat;
334

L
Linus Torvalds 已提交
335
	unsigned long		pages_scanned;	   /* since last reclaim */
336
	unsigned long		flags;		   /* zone flags, see below */
M
Martin Hicks 已提交
337

338 339
	/* Zone statistics */
	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
340

L
Linus Torvalds 已提交
341 342 343 344 345 346 347 348 349 350
	/*
	 * prev_priority holds the scanning priority for this zone.  It is
	 * defined as the scanning priority at which we achieved our reclaim
	 * target at the previous try_to_free_pages() or balance_pgdat()
	 * invokation.
	 *
	 * We use prev_priority as a measure of how much stress page reclaim is
	 * under - it drives the swappiness decision: whether to unmap mapped
	 * pages.
	 *
351
	 * Access to both this field is quite racy even on uniprocessor.  But
L
Linus Torvalds 已提交
352 353 354 355
	 * it is expected to average out OK.
	 */
	int prev_priority;

356 357 358 359 360 361
	/*
	 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
	 * this zone's LRU.  Maintained by the pageout code.
	 */
	unsigned int inactive_ratio;

L
Linus Torvalds 已提交
362 363 364 365 366 367

	ZONE_PADDING(_pad2_)
	/* Rarely used or read-mostly fields */

	/*
	 * wait_table		-- the array holding the hash table
368
	 * wait_table_hash_nr_entries	-- the size of the hash table array
L
Linus Torvalds 已提交
369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
	 *
	 * The purpose of all these is to keep track of the people
	 * waiting for a page to become available and make them
	 * runnable again when possible. The trouble is that this
	 * consumes a lot of space, especially when so few things
	 * wait on pages at a given time. So instead of using
	 * per-page waitqueues, we use a waitqueue hash table.
	 *
	 * The bucket discipline is to sleep on the same queue when
	 * colliding and wake all in that wait queue when removing.
	 * When something wakes, it must check to be sure its page is
	 * truly available, a la thundering herd. The cost of a
	 * collision is great, but given the expected load of the
	 * table, they should be so rare as to be outweighed by the
	 * benefits from the saved space.
	 *
	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
	 * primary users of these fields, and in mm/page_alloc.c
	 * free_area_init_core() performs the initialization of them.
	 */
	wait_queue_head_t	* wait_table;
391
	unsigned long		wait_table_hash_nr_entries;
L
Linus Torvalds 已提交
392 393 394 395 396 397 398 399 400
	unsigned long		wait_table_bits;

	/*
	 * Discontig memory support fields.
	 */
	struct pglist_data	*zone_pgdat;
	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
	unsigned long		zone_start_pfn;

401 402 403 404 405 406 407 408 409 410
	/*
	 * zone_start_pfn, spanned_pages and present_pages are all
	 * protected by span_seqlock.  It is a seqlock because it has
	 * to be read outside of zone->lock, and it is done in the main
	 * allocator path.  But, it is written quite infrequently.
	 *
	 * The lock is declared along with zone->lock because it is
	 * frequently read in proximity to zone->lock.  It's good to
	 * give them a chance of being in the same cacheline.
	 */
L
Linus Torvalds 已提交
411 412 413 414 415 416
	unsigned long		spanned_pages;	/* total size, including holes */
	unsigned long		present_pages;	/* amount of memory (excluding holes) */

	/*
	 * rarely used fields:
	 */
417
	const char		*name;
418
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
419

420 421 422
typedef enum {
	ZONE_ALL_UNRECLAIMABLE,		/* all pages pinned */
	ZONE_RECLAIM_LOCKED,		/* prevents concurrent reclaim */
D
David Rientjes 已提交
423
	ZONE_OOM_LOCKED,		/* zone is in OOM killer zonelist */
424 425 426 427 428 429
} zone_flags_t;

static inline void zone_set_flag(struct zone *zone, zone_flags_t flag)
{
	set_bit(flag, &zone->flags);
}
430 431 432 433 434 435

static inline int zone_test_and_set_flag(struct zone *zone, zone_flags_t flag)
{
	return test_and_set_bit(flag, &zone->flags);
}

436 437 438 439 440 441 442 443 444
static inline void zone_clear_flag(struct zone *zone, zone_flags_t flag)
{
	clear_bit(flag, &zone->flags);
}

static inline int zone_is_all_unreclaimable(const struct zone *zone)
{
	return test_bit(ZONE_ALL_UNRECLAIMABLE, &zone->flags);
}
445

446 447 448 449
static inline int zone_is_reclaim_locked(const struct zone *zone)
{
	return test_bit(ZONE_RECLAIM_LOCKED, &zone->flags);
}
450

D
David Rientjes 已提交
451 452 453 454
static inline int zone_is_oom_locked(const struct zone *zone)
{
	return test_bit(ZONE_OOM_LOCKED, &zone->flags);
}
455

L
Linus Torvalds 已提交
456 457 458 459 460 461 462
/*
 * The "priority" of VM scanning is how much of the queues we will scan in one
 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
 * queues ("queue_length >> 12") during an aging round.
 */
#define DEF_PRIORITY 12

463 464 465 466
/* Maximum number of zones on a zonelist */
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)

#ifdef CONFIG_NUMA
467 468 469 470 471

/*
 * The NUMA zonelists are doubled becausse we need zonelists that restrict the
 * allocations to a single node for GFP_THISNODE.
 *
472 473
 * [0]	: Zonelist with fallback
 * [1]	: No fallback (GFP_THISNODE)
474
 */
475
#define MAX_ZONELISTS 2
476 477


478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
/*
 * We cache key information from each zonelist for smaller cache
 * footprint when scanning for free pages in get_page_from_freelist().
 *
 * 1) The BITMAP fullzones tracks which zones in a zonelist have come
 *    up short of free memory since the last time (last_fullzone_zap)
 *    we zero'd fullzones.
 * 2) The array z_to_n[] maps each zone in the zonelist to its node
 *    id, so that we can efficiently evaluate whether that node is
 *    set in the current tasks mems_allowed.
 *
 * Both fullzones and z_to_n[] are one-to-one with the zonelist,
 * indexed by a zones offset in the zonelist zones[] array.
 *
 * The get_page_from_freelist() routine does two scans.  During the
 * first scan, we skip zones whose corresponding bit in 'fullzones'
 * is set or whose corresponding node in current->mems_allowed (which
 * comes from cpusets) is not set.  During the second scan, we bypass
 * this zonelist_cache, to ensure we look methodically at each zone.
 *
 * Once per second, we zero out (zap) fullzones, forcing us to
 * reconsider nodes that might have regained more free memory.
 * The field last_full_zap is the time we last zapped fullzones.
 *
 * This mechanism reduces the amount of time we waste repeatedly
 * reexaming zones for free memory when they just came up low on
 * memory momentarilly ago.
 *
 * The zonelist_cache struct members logically belong in struct
 * zonelist.  However, the mempolicy zonelists constructed for
 * MPOL_BIND are intentionally variable length (and usually much
 * shorter).  A general purpose mechanism for handling structs with
 * multiple variable length members is more mechanism than we want
 * here.  We resort to some special case hackery instead.
 *
 * The MPOL_BIND zonelists don't need this zonelist_cache (in good
 * part because they are shorter), so we put the fixed length stuff
 * at the front of the zonelist struct, ending in a variable length
 * zones[], as is needed by MPOL_BIND.
 *
 * Then we put the optional zonelist cache on the end of the zonelist
 * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in
 * the fixed length portion at the front of the struct.  This pointer
 * both enables us to find the zonelist cache, and in the case of
 * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
 * to know that the zonelist cache is not there.
 *
 * The end result is that struct zonelists come in two flavors:
 *  1) The full, fixed length version, shown below, and
 *  2) The custom zonelists for MPOL_BIND.
 * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
 *
 * Even though there may be multiple CPU cores on a node modifying
 * fullzones or last_full_zap in the same zonelist_cache at the same
 * time, we don't lock it.  This is just hint data - if it is wrong now
 * and then, the allocator will still function, perhaps a bit slower.
 */


struct zonelist_cache {
	unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */
539
	DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */
540 541 542
	unsigned long last_full_zap;		/* when last zap'd (jiffies) */
};
#else
543
#define MAX_ZONELISTS 1
544 545 546
struct zonelist_cache;
#endif

547 548 549 550 551 552 553 554 555
/*
 * This struct contains information about a zone in a zonelist. It is stored
 * here to avoid dereferences into large structures and lookups of tables
 */
struct zoneref {
	struct zone *zone;	/* Pointer to actual zone */
	int zone_idx;		/* zone_idx(zoneref->zone) */
};

L
Linus Torvalds 已提交
556 557 558 559 560 561
/*
 * One allocation request operates on a zonelist. A zonelist
 * is a list of zones, the first one is the 'goal' of the
 * allocation, the other zones are fallback zones, in decreasing
 * priority.
 *
562 563
 * If zlcache_ptr is not NULL, then it is just the address of zlcache,
 * as explained above.  If zlcache_ptr is NULL, there is no zlcache.
564 565 566 567 568 569 570 571
 * *
 * To speed the reading of the zonelist, the zonerefs contain the zone index
 * of the entry being read. Helper functions to access information given
 * a struct zoneref are
 *
 * zonelist_zone()	- Return the struct zone * for an entry in _zonerefs
 * zonelist_zone_idx()	- Return the index of the zone for an entry
 * zonelist_node_idx()	- Return the index of the node for an entry
L
Linus Torvalds 已提交
572 573
 */
struct zonelist {
574
	struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache
575
	struct zoneref _zonerefs[MAX_ZONES_PER_ZONELIST + 1];
576 577 578
#ifdef CONFIG_NUMA
	struct zonelist_cache zlcache;			     // optional ...
#endif
L
Linus Torvalds 已提交
579 580
};

581 582 583 584 585 586 587
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
struct node_active_region {
	unsigned long start_pfn;
	unsigned long end_pfn;
	int nid;
};
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
L
Linus Torvalds 已提交
588

589 590 591 592 593
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
#endif

L
Linus Torvalds 已提交
594 595 596 597 598 599 600 601 602 603 604 605 606 607
/*
 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
 * (mostly NUMA machines?) to denote a higher-level memory zone than the
 * zone denotes.
 *
 * On NUMA machines, each NUMA node would have a pg_data_t to describe
 * it's memory layout.
 *
 * Memory statistics and page replacement data structures are maintained on a
 * per-zone basis.
 */
struct bootmem_data;
typedef struct pglist_data {
	struct zone node_zones[MAX_NR_ZONES];
608
	struct zonelist node_zonelists[MAX_ZONELISTS];
L
Linus Torvalds 已提交
609
	int nr_zones;
610
#ifdef CONFIG_FLAT_NODE_MEM_MAP	/* means !SPARSEMEM */
L
Linus Torvalds 已提交
611
	struct page *node_mem_map;
612 613 614
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
	struct page_cgroup *node_page_cgroup;
#endif
A
Andy Whitcroft 已提交
615
#endif
L
Linus Torvalds 已提交
616
	struct bootmem_data *bdata;
617 618 619 620 621 622 623 624 625 626
#ifdef CONFIG_MEMORY_HOTPLUG
	/*
	 * Must be held any time you expect node_start_pfn, node_present_pages
	 * or node_spanned_pages stay constant.  Holding this will also
	 * guarantee that any pfn_valid() stays that way.
	 *
	 * Nests above zone->lock and zone->size_seqlock.
	 */
	spinlock_t node_size_lock;
#endif
L
Linus Torvalds 已提交
627 628 629 630 631 632 633 634 635 636 637 638
	unsigned long node_start_pfn;
	unsigned long node_present_pages; /* total number of physical pages */
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
	int node_id;
	wait_queue_head_t kswapd_wait;
	struct task_struct *kswapd;
	int kswapd_max_order;
} pg_data_t;

#define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
A
Andy Whitcroft 已提交
639
#ifdef CONFIG_FLAT_NODE_MEM_MAP
640
#define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
A
Andy Whitcroft 已提交
641 642 643
#else
#define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
#endif
644
#define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
L
Linus Torvalds 已提交
645

646 647
#include <linux/memory_hotplug.h>

L
Linus Torvalds 已提交
648 649 650 651 652
void get_zone_counts(unsigned long *active, unsigned long *inactive,
			unsigned long *free);
void build_all_zonelists(void);
void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
R
Rohit Seth 已提交
653
		int classzone_idx, int alloc_flags);
D
Dave Hansen 已提交
654 655 656 657
enum memmap_context {
	MEMMAP_EARLY,
	MEMMAP_HOTPLUG,
};
658
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
D
Dave Hansen 已提交
659 660
				     unsigned long size,
				     enum memmap_context context);
661

L
Linus Torvalds 已提交
662 663 664 665 666 667 668 669 670 671 672 673 674 675 676
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif

#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
#endif

/*
 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
 */
#define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)

677 678 679 680 681
static inline int populated_zone(struct zone *zone)
{
	return (!!zone->present_pages);
}

M
Mel Gorman 已提交
682 683 684 685 686 687 688 689 690 691 692
extern int movable_zone;

static inline int zone_movable_is_highmem(void)
{
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_ARCH_POPULATES_NODE_MAP)
	return movable_zone == ZONE_HIGHMEM;
#else
	return 0;
#endif
}

693
static inline int is_highmem_idx(enum zone_type idx)
L
Linus Torvalds 已提交
694
{
695
#ifdef CONFIG_HIGHMEM
M
Mel Gorman 已提交
696 697
	return (idx == ZONE_HIGHMEM ||
		(idx == ZONE_MOVABLE && zone_movable_is_highmem()));
698 699 700
#else
	return 0;
#endif
L
Linus Torvalds 已提交
701 702
}

703
static inline int is_normal_idx(enum zone_type idx)
L
Linus Torvalds 已提交
704 705 706
{
	return (idx == ZONE_NORMAL);
}
N
Nick Piggin 已提交
707

L
Linus Torvalds 已提交
708 709 710 711 712 713 714 715
/**
 * is_highmem - helper function to quickly check if a struct zone is a 
 *              highmem zone or not.  This is an attempt to keep references
 *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
 * @zone - pointer to struct zone variable
 */
static inline int is_highmem(struct zone *zone)
{
716
#ifdef CONFIG_HIGHMEM
717 718 719 720
	int zone_off = (char *)zone - (char *)zone->zone_pgdat->node_zones;
	return zone_off == ZONE_HIGHMEM * sizeof(*zone) ||
	       (zone_off == ZONE_MOVABLE * sizeof(*zone) &&
		zone_movable_is_highmem());
721 722 723
#else
	return 0;
#endif
L
Linus Torvalds 已提交
724 725 726 727 728 729 730
}

static inline int is_normal(struct zone *zone)
{
	return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
}

N
Nick Piggin 已提交
731 732
static inline int is_dma32(struct zone *zone)
{
733
#ifdef CONFIG_ZONE_DMA32
N
Nick Piggin 已提交
734
	return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
735 736 737
#else
	return 0;
#endif
N
Nick Piggin 已提交
738 739 740 741
}

static inline int is_dma(struct zone *zone)
{
742
#ifdef CONFIG_ZONE_DMA
N
Nick Piggin 已提交
743
	return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
744 745 746
#else
	return 0;
#endif
N
Nick Piggin 已提交
747 748
}

L
Linus Torvalds 已提交
749 750 751 752 753 754 755 756
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
struct file;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, 
					void __user *, size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
					void __user *, size_t *, loff_t *);
757 758
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
					void __user *, size_t *, loff_t *);
759 760
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
			struct file *, void __user *, size_t *, loff_t *);
761 762
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
			struct file *, void __user *, size_t *, loff_t *);
L
Linus Torvalds 已提交
763

764 765 766 767 768
extern int numa_zonelist_order_handler(struct ctl_table *, int,
			struct file *, void __user *, size_t *, loff_t *);
extern char numa_zonelist_order[];
#define NUMA_ZONELIST_ORDER_LEN 16	/* string buffer size */

769
#ifndef CONFIG_NEED_MULTIPLE_NODES
L
Linus Torvalds 已提交
770 771 772 773 774

extern struct pglist_data contig_page_data;
#define NODE_DATA(nid)		(&contig_page_data)
#define NODE_MEM_MAP(nid)	mem_map

775
#else /* CONFIG_NEED_MULTIPLE_NODES */
L
Linus Torvalds 已提交
776 777 778

#include <asm/mmzone.h>

779
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
780

781 782 783
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
784 785

/**
786
 * for_each_online_pgdat - helper macro to iterate over all online nodes
787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804
 * @pgdat - pointer to a pg_data_t variable
 */
#define for_each_online_pgdat(pgdat)			\
	for (pgdat = first_online_pgdat();		\
	     pgdat;					\
	     pgdat = next_online_pgdat(pgdat))
/**
 * for_each_zone - helper macro to iterate over all memory zones
 * @zone - pointer to struct zone variable
 *
 * The user only needs to declare the zone variable, for_each_zone
 * fills it in.
 */
#define for_each_zone(zone)			        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))

805 806 807 808 809 810 811 812
#define for_each_populated_zone(zone)		        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))			\
		if (!populated_zone(zone))		\
			; /* do nothing */		\
		else

813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832
static inline struct zone *zonelist_zone(struct zoneref *zoneref)
{
	return zoneref->zone;
}

static inline int zonelist_zone_idx(struct zoneref *zoneref)
{
	return zoneref->zone_idx;
}

static inline int zonelist_node_idx(struct zoneref *zoneref)
{
#ifdef CONFIG_NUMA
	/* zone_to_nid not available in this context */
	return zoneref->zone->node;
#else
	return 0;
#endif /* CONFIG_NUMA */
}

833 834 835 836 837 838 839 840 841
/**
 * next_zones_zonelist - Returns the next zone at or below highest_zoneidx within the allowed nodemask using a cursor within a zonelist as a starting point
 * @z - The cursor used as a starting point for the search
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
 * @zone - The first suitable zone found is returned via this parameter
 *
 * This function returns the next zone at or below a given zone index that is
 * within the allowed nodemask using a cursor as the starting point for the
842 843 844
 * search. The zoneref returned is a cursor that represents the current zone
 * being examined. It should be advanced by one before calling
 * next_zones_zonelist again.
845 846 847 848 849
 */
struct zoneref *next_zones_zonelist(struct zoneref *z,
					enum zone_type highest_zoneidx,
					nodemask_t *nodes,
					struct zone **zone);
850

851 852 853 854 855 856 857 858 859
/**
 * first_zones_zonelist - Returns the first zone at or below highest_zoneidx within the allowed nodemask in a zonelist
 * @zonelist - The zonelist to search for a suitable zone
 * @highest_zoneidx - The zone index of the highest zone to return
 * @nodes - An optional nodemask to filter the zonelist with
 * @zone - The first suitable zone found is returned via this parameter
 *
 * This function returns the first zone at or below a given zone index that is
 * within the allowed nodemask. The zoneref returned is a cursor that can be
860 861
 * used to iterate the zonelist with next_zones_zonelist by advancing it by
 * one before calling.
862
 */
863
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
864 865 866
					enum zone_type highest_zoneidx,
					nodemask_t *nodes,
					struct zone **zone)
867
{
868 869
	return next_zones_zonelist(zonelist->_zonerefs, highest_zoneidx, nodes,
								zone);
870 871
}

872 873 874 875 876 877 878 879 880 881 882 883 884 885
/**
 * for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 * @nodemask - Nodemask allowed by the allocator
 *
 * This iterator iterates though all zones at or below a given zone index and
 * within a given nodemask
 */
#define for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, nodemask) \
	for (z = first_zones_zonelist(zlist, highidx, nodemask, &zone);	\
		zone;							\
886
		z = next_zones_zonelist(++z, highidx, nodemask, &zone))	\
887 888 889 890 891 892 893 894 895 896 897

/**
 * for_each_zone_zonelist - helper macro to iterate over valid zones in a zonelist at or below a given zone index
 * @zone - The current zone in the iterator
 * @z - The current pointer within zonelist->zones being iterated
 * @zlist - The zonelist being iterated
 * @highidx - The zone index of the highest zone to return
 *
 * This iterator iterates though all zones at or below a given zone index.
 */
#define for_each_zone_zonelist(zone, z, zlist, highidx) \
898
	for_each_zone_zonelist_nodemask(zone, z, zlist, highidx, NULL)
899

A
Andy Whitcroft 已提交
900 901 902 903
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif

904 905
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
	!defined(CONFIG_ARCH_POPULATES_NODE_MAP)
906 907 908 909
static inline unsigned long early_pfn_to_nid(unsigned long pfn)
{
	return 0;
}
910 911
#endif

912 913 914 915
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn)		(0)
#endif

A
Andy Whitcroft 已提交
916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)

#ifdef CONFIG_SPARSEMEM

/*
 * SECTION_SHIFT    		#bits space required to store a section #
 *
 * PA_SECTION_SHIFT		physical address to/from section number
 * PFN_SECTION_SHIFT		pfn to/from section number
 */
#define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)

#define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
#define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)

#define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)

#define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))

937
#define SECTION_BLOCKFLAGS_BITS \
938
	((1UL << (PFN_SECTION_SHIFT - pageblock_order)) * NR_PAGEBLOCK_BITS)
939

A
Andy Whitcroft 已提交
940 941 942 943 944
#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif

struct page;
945
struct page_cgroup;
A
Andy Whitcroft 已提交
946
struct mem_section {
A
Andy Whitcroft 已提交
947 948 949 950 951
	/*
	 * This is, logically, a pointer to an array of struct
	 * pages.  However, it is stored with some other magic.
	 * (see sparse.c::sparse_init_one_section())
	 *
952 953 954 955
	 * Additionally during early boot we encode node id of
	 * the location of the section here to guide allocation.
	 * (see sparse.c::memory_present())
	 *
A
Andy Whitcroft 已提交
956 957 958 959
	 * Making it a UL at least makes someone do a cast
	 * before using it wrong.
	 */
	unsigned long section_mem_map;
960 961 962

	/* See declaration of similar field in struct zone */
	unsigned long *pageblock_flags;
963 964 965 966 967 968 969 970
#ifdef CONFIG_CGROUP_MEM_RES_CTLR
	/*
	 * If !SPARSEMEM, pgdat doesn't have page_cgroup pointer. We use
	 * section. (see memcontrol.h/page_cgroup.h about this.)
	 */
	struct page_cgroup *page_cgroup;
	unsigned long pad;
#endif
A
Andy Whitcroft 已提交
971 972
};

973 974 975 976 977
#ifdef CONFIG_SPARSEMEM_EXTREME
#define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section))
#else
#define SECTIONS_PER_ROOT	1
#endif
B
Bob Picco 已提交
978

979 980 981
#define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
#define NR_SECTION_ROOTS	(NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
#define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
B
Bob Picco 已提交
982

983 984
#ifdef CONFIG_SPARSEMEM_EXTREME
extern struct mem_section *mem_section[NR_SECTION_ROOTS];
B
Bob Picco 已提交
985
#else
986 987
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
A
Andy Whitcroft 已提交
988

A
Andy Whitcroft 已提交
989 990
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
991 992 993
	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
		return NULL;
	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
A
Andy Whitcroft 已提交
994
}
995
extern int __section_nr(struct mem_section* ms);
996
extern unsigned long usemap_size(void);
A
Andy Whitcroft 已提交
997 998 999 1000 1001 1002 1003 1004 1005 1006

/*
 * We use the lower bits of the mem_map pointer to store
 * a little bit of information.  There should be at least
 * 3 bits here due to 32-bit alignment.
 */
#define	SECTION_MARKED_PRESENT	(1UL<<0)
#define SECTION_HAS_MEM_MAP	(1UL<<1)
#define SECTION_MAP_LAST_BIT	(1UL<<2)
#define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
1007
#define SECTION_NID_SHIFT	2
A
Andy Whitcroft 已提交
1008 1009 1010 1011 1012 1013 1014 1015

static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
	unsigned long map = section->section_mem_map;
	map &= SECTION_MAP_MASK;
	return (struct page *)map;
}

1016
static inline int present_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1017
{
B
Bob Picco 已提交
1018
	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
A
Andy Whitcroft 已提交
1019 1020
}

1021 1022 1023 1024 1025 1026
static inline int present_section_nr(unsigned long nr)
{
	return present_section(__nr_to_section(nr));
}

static inline int valid_section(struct mem_section *section)
A
Andy Whitcroft 已提交
1027
{
B
Bob Picco 已提交
1028
	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
A
Andy Whitcroft 已提交
1029 1030 1031 1032 1033 1034 1035
}

static inline int valid_section_nr(unsigned long nr)
{
	return valid_section(__nr_to_section(nr));
}

A
Andy Whitcroft 已提交
1036 1037
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
A
Andy Whitcroft 已提交
1038
	return __nr_to_section(pfn_to_section_nr(pfn));
A
Andy Whitcroft 已提交
1039 1040 1041 1042 1043 1044
}

static inline int pfn_valid(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
A
Andy Whitcroft 已提交
1045
	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
A
Andy Whitcroft 已提交
1046 1047
}

1048 1049 1050 1051 1052 1053 1054
static inline int pfn_present(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
	return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
}

A
Andy Whitcroft 已提交
1055 1056 1057 1058 1059 1060
/*
 * These are _only_ used during initialisation, therefore they
 * can use __initdata ...  They could have names to indicate
 * this restriction.
 */
#ifdef CONFIG_NUMA
1061 1062 1063 1064 1065
#define pfn_to_nid(pfn)							\
({									\
	unsigned long __pfn_to_nid_pfn = (pfn);				\
	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\
})
1066 1067
#else
#define pfn_to_nid(pfn)		(0)
A
Andy Whitcroft 已提交
1068 1069 1070 1071 1072 1073
#endif

#define early_pfn_valid(pfn)	pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init()	do {} while (0)
1074
#define sparse_index_init(_sec, _nid)  do {} while (0)
A
Andy Whitcroft 已提交
1075 1076
#endif /* CONFIG_SPARSEMEM */

1077
#ifdef CONFIG_NODES_SPAN_OTHER_NODES
1078
bool early_pfn_in_nid(unsigned long pfn, int nid);
1079 1080 1081 1082
#else
#define early_pfn_in_nid(pfn, nid)	(1)
#endif

A
Andy Whitcroft 已提交
1083 1084 1085 1086 1087 1088 1089
#ifndef early_pfn_valid
#define early_pfn_valid(pfn)	(1)
#endif

void memory_present(int nid, unsigned long start, unsigned long end);
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);

1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
/*
 * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we
 * need to check pfn validility within that MAX_ORDER_NR_PAGES block.
 * pfn_valid_within() should be used in this case; we optimise this away
 * when we have no holes within a MAX_ORDER_NR_PAGES block.
 */
#ifdef CONFIG_HOLES_IN_ZONE
#define pfn_valid_within(pfn) pfn_valid(pfn)
#else
#define pfn_valid_within(pfn) (1)
#endif

1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
/*
 * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
 * associated with it or not. In FLATMEM, it is expected that holes always
 * have valid memmap as long as there is valid PFNs either side of the hole.
 * In SPARSEMEM, it is assumed that a valid section has a memmap for the
 * entire section.
 *
 * However, an ARM, and maybe other embedded architectures in the future
 * free memmap backing holes to save memory on the assumption the memmap is
 * never used. The page_zone linkages are then broken even though pfn_valid()
 * returns true. A walker of the full memmap must then do this additional
 * check to ensure the memmap they are looking at is sane by making sure
 * the zone and PFN linkages are still valid. This is expensive, but walkers
 * of the full memmap are extremely rare.
 */
int memmap_valid_within(unsigned long pfn,
					struct page *page, struct zone *zone);
#else
static inline int memmap_valid_within(unsigned long pfn,
					struct page *page, struct zone *zone)
{
	return 1;
}
#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */

C
Christoph Lameter 已提交
1128
#endif /* !__GENERATING_BOUNDS.H */
L
Linus Torvalds 已提交
1129 1130
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */