mmzone.h 23.2 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
#ifndef _LINUX_MMZONE_H
#define _LINUX_MMZONE_H

#ifdef __KERNEL__
#ifndef __ASSEMBLY__

#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/wait.h>
#include <linux/cache.h>
#include <linux/threads.h>
#include <linux/numa.h>
#include <linux/init.h>
14
#include <linux/seqlock.h>
15
#include <linux/nodemask.h>
L
Linus Torvalds 已提交
16
#include <asm/atomic.h>
R
Ralf Baechle 已提交
17
#include <asm/page.h>
L
Linus Torvalds 已提交
18 19 20 21 22 23 24

/* Free memory management - zoned buddy allocator.  */
#ifndef CONFIG_FORCE_MAX_ZONEORDER
#define MAX_ORDER 11
#else
#define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER
#endif
25
#define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1))
L
Linus Torvalds 已提交
26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42

struct free_area {
	struct list_head	free_list;
	unsigned long		nr_free;
};

struct pglist_data;

/*
 * zone->lock and zone->lru_lock are two of the hottest locks in the kernel.
 * So add a wild amount of padding here to ensure that they fall into separate
 * cachelines.  There are very few zone structures in the machine, so space
 * consumption is not a concern here.
 */
#if defined(CONFIG_SMP)
struct zone_padding {
	char x[0];
43
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
44 45 46 47 48
#define ZONE_PADDING(name)	struct zone_padding name;
#else
#define ZONE_PADDING(name)
#endif

49
enum zone_stat_item {
50 51
	NR_ANON_PAGES,	/* Mapped anonymous pages */
	NR_FILE_MAPPED,	/* pagecache pages mapped into pagetables.
52
			   only modified from process context */
53
	NR_FILE_PAGES,
54 55
	NR_SLAB_RECLAIMABLE,
	NR_SLAB_UNRECLAIMABLE,
56
	NR_PAGETABLE,	/* used for pagetables */
57
	NR_FILE_DIRTY,
58
	NR_WRITEBACK,
59
	NR_UNSTABLE_NFS,	/* NFS unstable pages */
60
	NR_BOUNCE,
61
	NR_VMSCAN_WRITE,
62 63 64 65 66 67 68 69
#ifdef CONFIG_NUMA
	NUMA_HIT,		/* allocated in intended node */
	NUMA_MISS,		/* allocated in non intended node */
	NUMA_FOREIGN,		/* was intended here, hit elsewhere */
	NUMA_INTERLEAVE_HIT,	/* interleaver preferred this zone */
	NUMA_LOCAL,		/* allocation from local node */
	NUMA_OTHER,		/* allocation from other node */
#endif
70 71
	NR_VM_ZONE_STAT_ITEMS };

L
Linus Torvalds 已提交
72 73 74 75 76 77 78 79 80
struct per_cpu_pages {
	int count;		/* number of pages in the list */
	int high;		/* high watermark, emptying needed */
	int batch;		/* chunk size for buddy add/remove */
	struct list_head list;	/* the list of pages */
};

struct per_cpu_pageset {
	struct per_cpu_pages pcp[2];	/* 0: hot.  1: cold */
81
#ifdef CONFIG_SMP
82
	s8 stat_threshold;
83 84
	s8 vm_stat_diff[NR_VM_ZONE_STAT_ITEMS];
#endif
L
Linus Torvalds 已提交
85 86
} ____cacheline_aligned_in_smp;

87 88 89 90 91 92
#ifdef CONFIG_NUMA
#define zone_pcp(__z, __cpu) ((__z)->pageset[(__cpu)])
#else
#define zone_pcp(__z, __cpu) (&(__z)->pageset[(__cpu)])
#endif

93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113
enum zone_type {
	/*
	 * ZONE_DMA is used when there are devices that are not able
	 * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
	 * carve out the portion of memory that is needed for these devices.
	 * The range is arch specific.
	 *
	 * Some examples
	 *
	 * Architecture		Limit
	 * ---------------------------
	 * parisc, ia64, sparc	<4G
	 * s390			<2G
	 * arm26		<48M
	 * arm			Various
	 * alpha		Unlimited or 0-16MB.
	 *
	 * i386, x86_64 and multiple other arches
	 * 			<16M.
	 */
	ZONE_DMA,
114
#ifdef CONFIG_ZONE_DMA32
115 116 117 118 119 120
	/*
	 * x86_64 needs two ZONE_DMAs because it supports devices that are
	 * only able to do DMA to the lower 16M but also 32 bit devices that
	 * can only do DMA areas below 4G.
	 */
	ZONE_DMA32,
121
#endif
122 123 124 125 126 127
	/*
	 * Normal addressable memory is in ZONE_NORMAL. DMA operations can be
	 * performed on pages in ZONE_NORMAL if the DMA devices support
	 * transfers to all addressable memory.
	 */
	ZONE_NORMAL,
128
#ifdef CONFIG_HIGHMEM
129 130 131 132 133 134 135 136 137
	/*
	 * A memory area that is only addressable by the kernel through
	 * mapping portions into its own address space. This is for example
	 * used by i386 to allow the kernel to address the memory beyond
	 * 900MB. The kernel will set up special mappings (page
	 * table entries on i386) for each page that the kernel needs to
	 * access.
	 */
	ZONE_HIGHMEM,
138
#endif
139 140
	MAX_NR_ZONES
};
L
Linus Torvalds 已提交
141 142 143 144 145 146

/*
 * When a memory allocation must conform to specific limitations (such
 * as being suitable for DMA) the caller will pass in hints to the
 * allocator in the gfp_mask, in the zone modifier bits.  These bits
 * are used to select a priority ordered list of memory zones which
147
 * match the requested limits. See gfp_zone() in include/linux/gfp.h
L
Linus Torvalds 已提交
148
 */
149

150 151
#if !defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_HIGHMEM)
#define ZONES_SHIFT 1
152
#else
153
#define ZONES_SHIFT 2
154
#endif
L
Linus Torvalds 已提交
155 156 157 158 159 160 161 162 163 164 165 166 167 168 169

struct zone {
	/* Fields commonly accessed by the page allocator */
	unsigned long		free_pages;
	unsigned long		pages_min, pages_low, pages_high;
	/*
	 * We don't know if the memory that we're going to allocate will be freeable
	 * or/and it will be released eventually, so to avoid totally wasting several
	 * GB of ram we must reserve some of the lower zone memory (otherwise we risk
	 * to run OOM on the lower zones despite there's tons of freeable ram
	 * on the higher zones). This array is recalculated at runtime if the
	 * sysctl_lowmem_reserve_ratio sysctl changes.
	 */
	unsigned long		lowmem_reserve[MAX_NR_ZONES];

170
#ifdef CONFIG_NUMA
171
	int node;
172 173 174
	/*
	 * zone reclaim becomes active if more unmapped pages exist.
	 */
175
	unsigned long		min_unmapped_pages;
176
	unsigned long		min_slab_pages;
177 178
	struct per_cpu_pageset	*pageset[NR_CPUS];
#else
L
Linus Torvalds 已提交
179
	struct per_cpu_pageset	pageset[NR_CPUS];
180
#endif
L
Linus Torvalds 已提交
181 182 183 184
	/*
	 * free areas of different sizes
	 */
	spinlock_t		lock;
185 186 187 188
#ifdef CONFIG_MEMORY_HOTPLUG
	/* see spanned/present_pages for more description */
	seqlock_t		span_seqlock;
#endif
L
Linus Torvalds 已提交
189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204
	struct free_area	free_area[MAX_ORDER];


	ZONE_PADDING(_pad1_)

	/* Fields commonly accessed by the page reclaim scanner */
	spinlock_t		lru_lock;	
	struct list_head	active_list;
	struct list_head	inactive_list;
	unsigned long		nr_scan_active;
	unsigned long		nr_scan_inactive;
	unsigned long		nr_active;
	unsigned long		nr_inactive;
	unsigned long		pages_scanned;	   /* since last reclaim */
	int			all_unreclaimable; /* All pages pinned */

205 206
	/* A count of how many reclaimers are scanning this zone */
	atomic_t		reclaim_in_progress;
M
Martin Hicks 已提交
207

208 209
	/* Zone statistics */
	atomic_long_t		vm_stat[NR_VM_ZONE_STAT_ITEMS];
210

L
Linus Torvalds 已提交
211 212 213 214 215 216 217 218 219 220
	/*
	 * prev_priority holds the scanning priority for this zone.  It is
	 * defined as the scanning priority at which we achieved our reclaim
	 * target at the previous try_to_free_pages() or balance_pgdat()
	 * invokation.
	 *
	 * We use prev_priority as a measure of how much stress page reclaim is
	 * under - it drives the swappiness decision: whether to unmap mapped
	 * pages.
	 *
221
	 * Access to both this field is quite racy even on uniprocessor.  But
L
Linus Torvalds 已提交
222 223 224 225 226 227 228 229 230 231
	 * it is expected to average out OK.
	 */
	int prev_priority;


	ZONE_PADDING(_pad2_)
	/* Rarely used or read-mostly fields */

	/*
	 * wait_table		-- the array holding the hash table
232
	 * wait_table_hash_nr_entries	-- the size of the hash table array
L
Linus Torvalds 已提交
233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
	 * wait_table_bits	-- wait_table_size == (1 << wait_table_bits)
	 *
	 * The purpose of all these is to keep track of the people
	 * waiting for a page to become available and make them
	 * runnable again when possible. The trouble is that this
	 * consumes a lot of space, especially when so few things
	 * wait on pages at a given time. So instead of using
	 * per-page waitqueues, we use a waitqueue hash table.
	 *
	 * The bucket discipline is to sleep on the same queue when
	 * colliding and wake all in that wait queue when removing.
	 * When something wakes, it must check to be sure its page is
	 * truly available, a la thundering herd. The cost of a
	 * collision is great, but given the expected load of the
	 * table, they should be so rare as to be outweighed by the
	 * benefits from the saved space.
	 *
	 * __wait_on_page_locked() and unlock_page() in mm/filemap.c, are the
	 * primary users of these fields, and in mm/page_alloc.c
	 * free_area_init_core() performs the initialization of them.
	 */
	wait_queue_head_t	* wait_table;
255
	unsigned long		wait_table_hash_nr_entries;
L
Linus Torvalds 已提交
256 257 258 259 260 261 262 263 264
	unsigned long		wait_table_bits;

	/*
	 * Discontig memory support fields.
	 */
	struct pglist_data	*zone_pgdat;
	/* zone_start_pfn == zone_start_paddr >> PAGE_SHIFT */
	unsigned long		zone_start_pfn;

265 266 267 268 269 270 271 272 273 274
	/*
	 * zone_start_pfn, spanned_pages and present_pages are all
	 * protected by span_seqlock.  It is a seqlock because it has
	 * to be read outside of zone->lock, and it is done in the main
	 * allocator path.  But, it is written quite infrequently.
	 *
	 * The lock is declared along with zone->lock because it is
	 * frequently read in proximity to zone->lock.  It's good to
	 * give them a chance of being in the same cacheline.
	 */
L
Linus Torvalds 已提交
275 276 277 278 279 280 281
	unsigned long		spanned_pages;	/* total size, including holes */
	unsigned long		present_pages;	/* amount of memory (excluding holes) */

	/*
	 * rarely used fields:
	 */
	char			*name;
282
} ____cacheline_internodealigned_in_smp;
L
Linus Torvalds 已提交
283 284 285 286 287 288 289 290

/*
 * The "priority" of VM scanning is how much of the queues we will scan in one
 * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the
 * queues ("queue_length >> 12") during an aging round.
 */
#define DEF_PRIORITY 12

291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362
/* Maximum number of zones on a zonelist */
#define MAX_ZONES_PER_ZONELIST (MAX_NUMNODES * MAX_NR_ZONES)

#ifdef CONFIG_NUMA
/*
 * We cache key information from each zonelist for smaller cache
 * footprint when scanning for free pages in get_page_from_freelist().
 *
 * 1) The BITMAP fullzones tracks which zones in a zonelist have come
 *    up short of free memory since the last time (last_fullzone_zap)
 *    we zero'd fullzones.
 * 2) The array z_to_n[] maps each zone in the zonelist to its node
 *    id, so that we can efficiently evaluate whether that node is
 *    set in the current tasks mems_allowed.
 *
 * Both fullzones and z_to_n[] are one-to-one with the zonelist,
 * indexed by a zones offset in the zonelist zones[] array.
 *
 * The get_page_from_freelist() routine does two scans.  During the
 * first scan, we skip zones whose corresponding bit in 'fullzones'
 * is set or whose corresponding node in current->mems_allowed (which
 * comes from cpusets) is not set.  During the second scan, we bypass
 * this zonelist_cache, to ensure we look methodically at each zone.
 *
 * Once per second, we zero out (zap) fullzones, forcing us to
 * reconsider nodes that might have regained more free memory.
 * The field last_full_zap is the time we last zapped fullzones.
 *
 * This mechanism reduces the amount of time we waste repeatedly
 * reexaming zones for free memory when they just came up low on
 * memory momentarilly ago.
 *
 * The zonelist_cache struct members logically belong in struct
 * zonelist.  However, the mempolicy zonelists constructed for
 * MPOL_BIND are intentionally variable length (and usually much
 * shorter).  A general purpose mechanism for handling structs with
 * multiple variable length members is more mechanism than we want
 * here.  We resort to some special case hackery instead.
 *
 * The MPOL_BIND zonelists don't need this zonelist_cache (in good
 * part because they are shorter), so we put the fixed length stuff
 * at the front of the zonelist struct, ending in a variable length
 * zones[], as is needed by MPOL_BIND.
 *
 * Then we put the optional zonelist cache on the end of the zonelist
 * struct.  This optional stuff is found by a 'zlcache_ptr' pointer in
 * the fixed length portion at the front of the struct.  This pointer
 * both enables us to find the zonelist cache, and in the case of
 * MPOL_BIND zonelists, (which will just set the zlcache_ptr to NULL)
 * to know that the zonelist cache is not there.
 *
 * The end result is that struct zonelists come in two flavors:
 *  1) The full, fixed length version, shown below, and
 *  2) The custom zonelists for MPOL_BIND.
 * The custom MPOL_BIND zonelists have a NULL zlcache_ptr and no zlcache.
 *
 * Even though there may be multiple CPU cores on a node modifying
 * fullzones or last_full_zap in the same zonelist_cache at the same
 * time, we don't lock it.  This is just hint data - if it is wrong now
 * and then, the allocator will still function, perhaps a bit slower.
 */


struct zonelist_cache {
	DECLARE_BITMAP(fullzones, MAX_ZONES_PER_ZONELIST);	/* zone full? */
	unsigned short z_to_n[MAX_ZONES_PER_ZONELIST];		/* zone->nid */
	unsigned long last_full_zap;		/* when last zap'd (jiffies) */
};
#else
struct zonelist_cache;
#endif

L
Linus Torvalds 已提交
363 364 365 366 367 368
/*
 * One allocation request operates on a zonelist. A zonelist
 * is a list of zones, the first one is the 'goal' of the
 * allocation, the other zones are fallback zones, in decreasing
 * priority.
 *
369 370
 * If zlcache_ptr is not NULL, then it is just the address of zlcache,
 * as explained above.  If zlcache_ptr is NULL, there is no zlcache.
L
Linus Torvalds 已提交
371
 */
372

L
Linus Torvalds 已提交
373
struct zonelist {
374 375 376 377 378
	struct zonelist_cache *zlcache_ptr;		     // NULL or &zlcache
	struct zone *zones[MAX_ZONES_PER_ZONELIST + 1];      // NULL delimited
#ifdef CONFIG_NUMA
	struct zonelist_cache zlcache;			     // optional ...
#endif
L
Linus Torvalds 已提交
379 380
};

381 382 383 384 385 386 387
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
struct node_active_region {
	unsigned long start_pfn;
	unsigned long end_pfn;
	int nid;
};
#endif /* CONFIG_ARCH_POPULATES_NODE_MAP */
L
Linus Torvalds 已提交
388

389 390 391 392 393
#ifndef CONFIG_DISCONTIGMEM
/* The array of struct pages - for discontigmem use pgdat->lmem_map */
extern struct page *mem_map;
#endif

L
Linus Torvalds 已提交
394 395 396 397 398 399 400 401 402 403 404 405 406 407
/*
 * The pg_data_t structure is used in machines with CONFIG_DISCONTIGMEM
 * (mostly NUMA machines?) to denote a higher-level memory zone than the
 * zone denotes.
 *
 * On NUMA machines, each NUMA node would have a pg_data_t to describe
 * it's memory layout.
 *
 * Memory statistics and page replacement data structures are maintained on a
 * per-zone basis.
 */
struct bootmem_data;
typedef struct pglist_data {
	struct zone node_zones[MAX_NR_ZONES];
408
	struct zonelist node_zonelists[MAX_NR_ZONES];
L
Linus Torvalds 已提交
409
	int nr_zones;
A
Andy Whitcroft 已提交
410
#ifdef CONFIG_FLAT_NODE_MEM_MAP
L
Linus Torvalds 已提交
411
	struct page *node_mem_map;
A
Andy Whitcroft 已提交
412
#endif
L
Linus Torvalds 已提交
413
	struct bootmem_data *bdata;
414 415 416 417 418 419 420 421 422 423
#ifdef CONFIG_MEMORY_HOTPLUG
	/*
	 * Must be held any time you expect node_start_pfn, node_present_pages
	 * or node_spanned_pages stay constant.  Holding this will also
	 * guarantee that any pfn_valid() stays that way.
	 *
	 * Nests above zone->lock and zone->size_seqlock.
	 */
	spinlock_t node_size_lock;
#endif
L
Linus Torvalds 已提交
424 425 426 427 428 429 430 431 432 433 434 435
	unsigned long node_start_pfn;
	unsigned long node_present_pages; /* total number of physical pages */
	unsigned long node_spanned_pages; /* total size of physical page
					     range, including holes */
	int node_id;
	wait_queue_head_t kswapd_wait;
	struct task_struct *kswapd;
	int kswapd_max_order;
} pg_data_t;

#define node_present_pages(nid)	(NODE_DATA(nid)->node_present_pages)
#define node_spanned_pages(nid)	(NODE_DATA(nid)->node_spanned_pages)
A
Andy Whitcroft 已提交
436
#ifdef CONFIG_FLAT_NODE_MEM_MAP
437
#define pgdat_page_nr(pgdat, pagenr)	((pgdat)->node_mem_map + (pagenr))
A
Andy Whitcroft 已提交
438 439 440
#else
#define pgdat_page_nr(pgdat, pagenr)	pfn_to_page((pgdat)->node_start_pfn + (pagenr))
#endif
441
#define nid_page_nr(nid, pagenr) 	pgdat_page_nr(NODE_DATA(nid),(pagenr))
L
Linus Torvalds 已提交
442

443 444
#include <linux/memory_hotplug.h>

L
Linus Torvalds 已提交
445 446 447 448 449 450 451
void __get_zone_counts(unsigned long *active, unsigned long *inactive,
			unsigned long *free, struct pglist_data *pgdat);
void get_zone_counts(unsigned long *active, unsigned long *inactive,
			unsigned long *free);
void build_all_zonelists(void);
void wakeup_kswapd(struct zone *zone, int order);
int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
R
Rohit Seth 已提交
452
		int classzone_idx, int alloc_flags);
L
Linus Torvalds 已提交
453

454 455 456
extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
				     unsigned long size);

L
Linus Torvalds 已提交
457 458 459 460 461 462 463 464 465 466 467 468 469 470 471
#ifdef CONFIG_HAVE_MEMORY_PRESENT
void memory_present(int nid, unsigned long start, unsigned long end);
#else
static inline void memory_present(int nid, unsigned long start, unsigned long end) {}
#endif

#ifdef CONFIG_NEED_NODE_MEMMAP_SIZE
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
#endif

/*
 * zone_idx() returns 0 for the ZONE_DMA zone, 1 for the ZONE_NORMAL zone, etc.
 */
#define zone_idx(zone)		((zone) - (zone)->zone_pgdat->node_zones)

472 473 474 475 476
static inline int populated_zone(struct zone *zone)
{
	return (!!zone->present_pages);
}

477
static inline int is_highmem_idx(enum zone_type idx)
L
Linus Torvalds 已提交
478
{
479
#ifdef CONFIG_HIGHMEM
L
Linus Torvalds 已提交
480
	return (idx == ZONE_HIGHMEM);
481 482 483
#else
	return 0;
#endif
L
Linus Torvalds 已提交
484 485
}

486
static inline int is_normal_idx(enum zone_type idx)
L
Linus Torvalds 已提交
487 488 489
{
	return (idx == ZONE_NORMAL);
}
N
Nick Piggin 已提交
490

L
Linus Torvalds 已提交
491 492 493 494 495 496 497 498
/**
 * is_highmem - helper function to quickly check if a struct zone is a 
 *              highmem zone or not.  This is an attempt to keep references
 *              to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum.
 * @zone - pointer to struct zone variable
 */
static inline int is_highmem(struct zone *zone)
{
499
#ifdef CONFIG_HIGHMEM
L
Linus Torvalds 已提交
500
	return zone == zone->zone_pgdat->node_zones + ZONE_HIGHMEM;
501 502 503
#else
	return 0;
#endif
L
Linus Torvalds 已提交
504 505 506 507 508 509 510
}

static inline int is_normal(struct zone *zone)
{
	return zone == zone->zone_pgdat->node_zones + ZONE_NORMAL;
}

N
Nick Piggin 已提交
511 512
static inline int is_dma32(struct zone *zone)
{
513
#ifdef CONFIG_ZONE_DMA32
N
Nick Piggin 已提交
514
	return zone == zone->zone_pgdat->node_zones + ZONE_DMA32;
515 516 517
#else
	return 0;
#endif
N
Nick Piggin 已提交
518 519 520 521 522 523 524
}

static inline int is_dma(struct zone *zone)
{
	return zone == zone->zone_pgdat->node_zones + ZONE_DMA;
}

L
Linus Torvalds 已提交
525 526 527 528 529 530 531 532
/* These two functions are used to setup the per zone pages min values */
struct ctl_table;
struct file;
int min_free_kbytes_sysctl_handler(struct ctl_table *, int, struct file *, 
					void __user *, size_t *, loff_t *);
extern int sysctl_lowmem_reserve_ratio[MAX_NR_ZONES-1];
int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
					void __user *, size_t *, loff_t *);
533 534
int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *, int, struct file *,
					void __user *, size_t *, loff_t *);
535 536
int sysctl_min_unmapped_ratio_sysctl_handler(struct ctl_table *, int,
			struct file *, void __user *, size_t *, loff_t *);
537 538
int sysctl_min_slab_ratio_sysctl_handler(struct ctl_table *, int,
			struct file *, void __user *, size_t *, loff_t *);
L
Linus Torvalds 已提交
539 540 541

#include <linux/topology.h>
/* Returns the number of the current Node. */
542
#ifndef numa_node_id
I
Ingo Molnar 已提交
543
#define numa_node_id()		(cpu_to_node(raw_smp_processor_id()))
544
#endif
L
Linus Torvalds 已提交
545

546
#ifndef CONFIG_NEED_MULTIPLE_NODES
L
Linus Torvalds 已提交
547 548 549 550 551 552

extern struct pglist_data contig_page_data;
#define NODE_DATA(nid)		(&contig_page_data)
#define NODE_MEM_MAP(nid)	mem_map
#define MAX_NODES_SHIFT		1

553
#else /* CONFIG_NEED_MULTIPLE_NODES */
L
Linus Torvalds 已提交
554 555 556

#include <asm/mmzone.h>

557
#endif /* !CONFIG_NEED_MULTIPLE_NODES */
558

559 560 561
extern struct pglist_data *first_online_pgdat(void);
extern struct pglist_data *next_online_pgdat(struct pglist_data *pgdat);
extern struct zone *next_zone(struct zone *zone);
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582

/**
 * for_each_pgdat - helper macro to iterate over all nodes
 * @pgdat - pointer to a pg_data_t variable
 */
#define for_each_online_pgdat(pgdat)			\
	for (pgdat = first_online_pgdat();		\
	     pgdat;					\
	     pgdat = next_online_pgdat(pgdat))
/**
 * for_each_zone - helper macro to iterate over all memory zones
 * @zone - pointer to struct zone variable
 *
 * The user only needs to declare the zone variable, for_each_zone
 * fills it in.
 */
#define for_each_zone(zone)			        \
	for (zone = (first_online_pgdat())->node_zones; \
	     zone;					\
	     zone = next_zone(zone))

A
Andy Whitcroft 已提交
583 584 585 586
#ifdef CONFIG_SPARSEMEM
#include <asm/sparsemem.h>
#endif

587
#if BITS_PER_LONG == 32
L
Linus Torvalds 已提交
588
/*
A
Andi Kleen 已提交
589 590
 * with 32 bit page->flags field, we reserve 9 bits for node/zone info.
 * there are 4 zones (3 bits) and this leaves 9-3=6 bits for nodes.
L
Linus Torvalds 已提交
591
 */
A
Andi Kleen 已提交
592
#define FLAGS_RESERVED		9
593

L
Linus Torvalds 已提交
594 595 596 597
#elif BITS_PER_LONG == 64
/*
 * with 64 bit flags field, there's plenty of room.
 */
598
#define FLAGS_RESERVED		32
L
Linus Torvalds 已提交
599

600
#else
L
Linus Torvalds 已提交
601

602
#error BITS_PER_LONG not defined
L
Linus Torvalds 已提交
603 604 605

#endif

606 607
#if !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) && \
	!defined(CONFIG_ARCH_POPULATES_NODE_MAP)
608 609 610
#define early_pfn_to_nid(nid)  (0UL)
#endif

611 612 613 614
#ifdef CONFIG_FLATMEM
#define pfn_to_nid(pfn)		(0)
#endif

A
Andy Whitcroft 已提交
615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641
#define pfn_to_section_nr(pfn) ((pfn) >> PFN_SECTION_SHIFT)
#define section_nr_to_pfn(sec) ((sec) << PFN_SECTION_SHIFT)

#ifdef CONFIG_SPARSEMEM

/*
 * SECTION_SHIFT    		#bits space required to store a section #
 *
 * PA_SECTION_SHIFT		physical address to/from section number
 * PFN_SECTION_SHIFT		pfn to/from section number
 */
#define SECTIONS_SHIFT		(MAX_PHYSMEM_BITS - SECTION_SIZE_BITS)

#define PA_SECTION_SHIFT	(SECTION_SIZE_BITS)
#define PFN_SECTION_SHIFT	(SECTION_SIZE_BITS - PAGE_SHIFT)

#define NR_MEM_SECTIONS		(1UL << SECTIONS_SHIFT)

#define PAGES_PER_SECTION       (1UL << PFN_SECTION_SHIFT)
#define PAGE_SECTION_MASK	(~(PAGES_PER_SECTION-1))

#if (MAX_ORDER - 1 + PAGE_SHIFT) > SECTION_SIZE_BITS
#error Allocator MAX_ORDER exceeds SECTION_SIZE
#endif

struct page;
struct mem_section {
A
Andy Whitcroft 已提交
642 643 644 645 646
	/*
	 * This is, logically, a pointer to an array of struct
	 * pages.  However, it is stored with some other magic.
	 * (see sparse.c::sparse_init_one_section())
	 *
647 648 649 650
	 * Additionally during early boot we encode node id of
	 * the location of the section here to guide allocation.
	 * (see sparse.c::memory_present())
	 *
A
Andy Whitcroft 已提交
651 652 653 654
	 * Making it a UL at least makes someone do a cast
	 * before using it wrong.
	 */
	unsigned long section_mem_map;
A
Andy Whitcroft 已提交
655 656
};

657 658 659 660 661
#ifdef CONFIG_SPARSEMEM_EXTREME
#define SECTIONS_PER_ROOT       (PAGE_SIZE / sizeof (struct mem_section))
#else
#define SECTIONS_PER_ROOT	1
#endif
B
Bob Picco 已提交
662

663 664 665
#define SECTION_NR_TO_ROOT(sec)	((sec) / SECTIONS_PER_ROOT)
#define NR_SECTION_ROOTS	(NR_MEM_SECTIONS / SECTIONS_PER_ROOT)
#define SECTION_ROOT_MASK	(SECTIONS_PER_ROOT - 1)
B
Bob Picco 已提交
666

667 668
#ifdef CONFIG_SPARSEMEM_EXTREME
extern struct mem_section *mem_section[NR_SECTION_ROOTS];
B
Bob Picco 已提交
669
#else
670 671
extern struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT];
#endif
A
Andy Whitcroft 已提交
672

A
Andy Whitcroft 已提交
673 674
static inline struct mem_section *__nr_to_section(unsigned long nr)
{
675 676 677
	if (!mem_section[SECTION_NR_TO_ROOT(nr)])
		return NULL;
	return &mem_section[SECTION_NR_TO_ROOT(nr)][nr & SECTION_ROOT_MASK];
A
Andy Whitcroft 已提交
678
}
679
extern int __section_nr(struct mem_section* ms);
A
Andy Whitcroft 已提交
680 681 682 683 684 685 686 687 688 689

/*
 * We use the lower bits of the mem_map pointer to store
 * a little bit of information.  There should be at least
 * 3 bits here due to 32-bit alignment.
 */
#define	SECTION_MARKED_PRESENT	(1UL<<0)
#define SECTION_HAS_MEM_MAP	(1UL<<1)
#define SECTION_MAP_LAST_BIT	(1UL<<2)
#define SECTION_MAP_MASK	(~(SECTION_MAP_LAST_BIT-1))
690
#define SECTION_NID_SHIFT	2
A
Andy Whitcroft 已提交
691 692 693 694 695 696 697 698 699 700

static inline struct page *__section_mem_map_addr(struct mem_section *section)
{
	unsigned long map = section->section_mem_map;
	map &= SECTION_MAP_MASK;
	return (struct page *)map;
}

static inline int valid_section(struct mem_section *section)
{
B
Bob Picco 已提交
701
	return (section && (section->section_mem_map & SECTION_MARKED_PRESENT));
A
Andy Whitcroft 已提交
702 703 704 705
}

static inline int section_has_mem_map(struct mem_section *section)
{
B
Bob Picco 已提交
706
	return (section && (section->section_mem_map & SECTION_HAS_MEM_MAP));
A
Andy Whitcroft 已提交
707 708 709 710 711 712 713
}

static inline int valid_section_nr(unsigned long nr)
{
	return valid_section(__nr_to_section(nr));
}

A
Andy Whitcroft 已提交
714 715
static inline struct mem_section *__pfn_to_section(unsigned long pfn)
{
A
Andy Whitcroft 已提交
716
	return __nr_to_section(pfn_to_section_nr(pfn));
A
Andy Whitcroft 已提交
717 718 719 720 721 722
}

static inline int pfn_valid(unsigned long pfn)
{
	if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
		return 0;
A
Andy Whitcroft 已提交
723
	return valid_section(__nr_to_section(pfn_to_section_nr(pfn)));
A
Andy Whitcroft 已提交
724 725 726 727 728 729 730 731
}

/*
 * These are _only_ used during initialisation, therefore they
 * can use __initdata ...  They could have names to indicate
 * this restriction.
 */
#ifdef CONFIG_NUMA
732 733 734 735 736
#define pfn_to_nid(pfn)							\
({									\
	unsigned long __pfn_to_nid_pfn = (pfn);				\
	page_to_nid(pfn_to_page(__pfn_to_nid_pfn));			\
})
737 738
#else
#define pfn_to_nid(pfn)		(0)
A
Andy Whitcroft 已提交
739 740 741 742 743 744
#endif

#define early_pfn_valid(pfn)	pfn_valid(pfn)
void sparse_init(void);
#else
#define sparse_init()	do {} while (0)
745
#define sparse_index_init(_sec, _nid)  do {} while (0)
A
Andy Whitcroft 已提交
746 747
#endif /* CONFIG_SPARSEMEM */

748 749 750 751 752 753
#ifdef CONFIG_NODES_SPAN_OTHER_NODES
#define early_pfn_in_nid(pfn, nid)	(early_pfn_to_nid(pfn) == (nid))
#else
#define early_pfn_in_nid(pfn, nid)	(1)
#endif

A
Andy Whitcroft 已提交
754 755 756 757 758 759 760
#ifndef early_pfn_valid
#define early_pfn_valid(pfn)	(1)
#endif

void memory_present(int nid, unsigned long start, unsigned long end);
unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);

L
Linus Torvalds 已提交
761 762 763
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _LINUX_MMZONE_H */