discontig.c 22.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
/*
 * Copyright (c) 2000, 2003 Silicon Graphics, Inc.  All rights reserved.
 * Copyright (c) 2001 Intel Corp.
 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
 * Copyright (c) 2002 NEC Corp.
 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
 * Copyright (c) 2004 Silicon Graphics, Inc
 *	Russ Anderson <rja@sgi.com>
 *	Jesse Barnes <jbarnes@sgi.com>
 *	Jack Steiner <steiner@sgi.com>
 */

/*
 * Platform initialization for Discontig Memory
 */

#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
#include <linux/efi.h>
#include <linux/nodemask.h>
#include <asm/pgalloc.h>
#include <asm/tlb.h>
#include <asm/meminit.h>
#include <asm/numa.h>
#include <asm/sections.h>

/*
 * Track per-node information needed to setup the boot memory allocator, the
 * per-node areas, and the real VM.
 */
struct early_node_data {
	struct ia64_node_data *node_data;
	unsigned long pernode_addr;
	unsigned long pernode_size;
	struct bootmem_data bootmem_data;
	unsigned long num_physpages;
	unsigned long num_dma_physpages;
	unsigned long min_pfn;
	unsigned long max_pfn;
};

static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
B
bob.picco 已提交
46
static nodemask_t memory_less_mask __initdata;
L
Linus Torvalds 已提交
47

48 49
static pg_data_t *pgdat_list[MAX_NUMNODES];

L
Linus Torvalds 已提交
50 51 52 53
/*
 * To prevent cache aliasing effects, align per-node structures so that they
 * start at addresses that are strided by node number.
 */
54
#define MAX_NODE_ALIGN_OFFSET	(32 * 1024 * 1024)
L
Linus Torvalds 已提交
55
#define NODEDATA_ALIGN(addr, node)						\
56 57
	((((addr) + 1024*1024-1) & ~(1024*1024-1)) + 				\
	     (((node)*PERCPU_PAGE_SIZE) & (MAX_NODE_ALIGN_OFFSET - 1)))
L
Linus Torvalds 已提交
58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95

/**
 * build_node_maps - callback to setup bootmem structs for each node
 * @start: physical start of range
 * @len: length of range
 * @node: node where this range resides
 *
 * We allocate a struct bootmem_data for each piece of memory that we wish to
 * treat as a virtually contiguous block (i.e. each node). Each such block
 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
 * if necessary.  Any non-existent pages will simply be part of the virtual
 * memmap.  We also update min_low_pfn and max_low_pfn here as we receive
 * memory ranges from the caller.
 */
static int __init build_node_maps(unsigned long start, unsigned long len,
				  int node)
{
	unsigned long cstart, epfn, end = start + len;
	struct bootmem_data *bdp = &mem_data[node].bootmem_data;

	epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
	cstart = GRANULEROUNDDOWN(start);

	if (!bdp->node_low_pfn) {
		bdp->node_boot_start = cstart;
		bdp->node_low_pfn = epfn;
	} else {
		bdp->node_boot_start = min(cstart, bdp->node_boot_start);
		bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
	}

	min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
	max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);

	return 0;
}

/**
B
bob.picco 已提交
96
 * early_nr_cpus_node - return number of cpus on a given node
L
Linus Torvalds 已提交
97 98
 * @node: node to check
 *
B
bob.picco 已提交
99
 * Count the number of cpus on @node.  We can't use nr_cpus_node() yet because
L
Linus Torvalds 已提交
100
 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
B
bob.picco 已提交
101
 * called yet.  Note that node 0 will also count all non-existent cpus.
L
Linus Torvalds 已提交
102
 */
103
static int __meminit early_nr_cpus_node(int node)
L
Linus Torvalds 已提交
104 105 106 107 108
{
	int cpu, n = 0;

	for (cpu = 0; cpu < NR_CPUS; cpu++)
		if (node == node_cpuid[cpu].nid)
B
bob.picco 已提交
109
			n++;
L
Linus Torvalds 已提交
110 111 112 113

	return n;
}

B
bob.picco 已提交
114 115 116 117
/**
 * compute_pernodesize - compute size of pernode data
 * @node: the node id.
 */
118
static unsigned long __meminit compute_pernodesize(int node)
B
bob.picco 已提交
119 120 121 122 123 124 125 126 127 128 129
{
	unsigned long pernodesize = 0, cpus;

	cpus = early_nr_cpus_node(node);
	pernodesize += PERCPU_PAGE_SIZE * cpus;
	pernodesize += node * L1_CACHE_BYTES;
	pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
	pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
	pernodesize = PAGE_ALIGN(pernodesize);
	return pernodesize;
}
L
Linus Torvalds 已提交
130

T
Tony Luck 已提交
131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157
/**
 * per_cpu_node_setup - setup per-cpu areas on each node
 * @cpu_data: per-cpu area on this node
 * @node: node to setup
 *
 * Copy the static per-cpu data into the region we just set aside and then
 * setup __per_cpu_offset for each CPU on this node.  Return a pointer to
 * the end of the area.
 */
static void *per_cpu_node_setup(void *cpu_data, int node)
{
#ifdef CONFIG_SMP
	int cpu;

	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		if (node == node_cpuid[cpu].nid) {
			memcpy(__va(cpu_data), __phys_per_cpu_start,
			       __per_cpu_end - __per_cpu_start);
			__per_cpu_offset[cpu] = (char*)__va(cpu_data) -
				__per_cpu_start;
			cpu_data += PERCPU_PAGE_SIZE;
		}
	}
#endif
	return cpu_data;
}

L
Linus Torvalds 已提交
158
/**
B
bob.picco 已提交
159 160 161 162
 * fill_pernode - initialize pernode data.
 * @node: the node id.
 * @pernode: physical address of pernode data
 * @pernodesize: size of the pernode data
L
Linus Torvalds 已提交
163
 */
B
bob.picco 已提交
164 165
static void __init fill_pernode(int node, unsigned long pernode,
	unsigned long pernodesize)
L
Linus Torvalds 已提交
166
{
B
bob.picco 已提交
167
	void *cpu_data;
T
Tony Luck 已提交
168
	int cpus = early_nr_cpus_node(node);
B
bob.picco 已提交
169
	struct bootmem_data *bdp = &mem_data[node].bootmem_data;
L
Linus Torvalds 已提交
170

B
bob.picco 已提交
171 172 173
	mem_data[node].pernode_addr = pernode;
	mem_data[node].pernode_size = pernodesize;
	memset(__va(pernode), 0, pernodesize);
L
Linus Torvalds 已提交
174

B
bob.picco 已提交
175 176 177 178
	cpu_data = (void *)pernode;
	pernode += PERCPU_PAGE_SIZE * cpus;
	pernode += node * L1_CACHE_BYTES;

179
	pgdat_list[node] = __va(pernode);
B
bob.picco 已提交
180 181 182 183 184
	pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));

	mem_data[node].node_data = __va(pernode);
	pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));

185
	pgdat_list[node]->bdata = bdp;
B
bob.picco 已提交
186 187
	pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));

T
Tony Luck 已提交
188
	cpu_data = per_cpu_node_setup(cpu_data, node);
L
Linus Torvalds 已提交
189

B
bob.picco 已提交
190 191
	return;
}
T
Tony Luck 已提交
192

L
Linus Torvalds 已提交
193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223
/**
 * find_pernode_space - allocate memory for memory map and per-node structures
 * @start: physical start of range
 * @len: length of range
 * @node: node where this range resides
 *
 * This routine reserves space for the per-cpu data struct, the list of
 * pg_data_ts and the per-node data struct.  Each node will have something like
 * the following in the first chunk of addr. space large enough to hold it.
 *
 *    ________________________
 *   |                        |
 *   |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
 *   |    PERCPU_PAGE_SIZE *  |     start and length big enough
 *   |    cpus_on_this_node   | Node 0 will also have entries for all non-existent cpus.
 *   |------------------------|
 *   |   local pg_data_t *    |
 *   |------------------------|
 *   |  local ia64_node_data  |
 *   |------------------------|
 *   |          ???           |
 *   |________________________|
 *
 * Once this space has been set aside, the bootmem maps are initialized.  We
 * could probably move the allocation of the per-cpu and ia64_node_data space
 * outside of this function and use alloc_bootmem_node(), but doing it here
 * is straightforward and we get the alignments we want so...
 */
static int __init find_pernode_space(unsigned long start, unsigned long len,
				     int node)
{
B
bob.picco 已提交
224
	unsigned long epfn;
L
Linus Torvalds 已提交
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
	unsigned long pernodesize = 0, pernode, pages, mapsize;
	struct bootmem_data *bdp = &mem_data[node].bootmem_data;

	epfn = (start + len) >> PAGE_SHIFT;

	pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
	mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;

	/*
	 * Make sure this memory falls within this node's usable memory
	 * since we may have thrown some away in build_maps().
	 */
	if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
		return 0;

	/* Don't setup this node's local space twice... */
	if (mem_data[node].pernode_addr)
		return 0;

	/*
	 * Calculate total size needed, incl. what's necessary
	 * for good alignment and alias prevention.
	 */
B
bob.picco 已提交
248
	pernodesize = compute_pernodesize(node);
L
Linus Torvalds 已提交
249 250 251
	pernode = NODEDATA_ALIGN(start, node);

	/* Is this range big enough for what we want to store here? */
B
bob.picco 已提交
252 253
	if (start + len > (pernode + pernodesize + mapsize))
		fill_pernode(node, pernode, pernodesize);
L
Linus Torvalds 已提交
254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271

	return 0;
}

/**
 * free_node_bootmem - free bootmem allocator memory for use
 * @start: physical start of range
 * @len: length of range
 * @node: node where this range resides
 *
 * Simply calls the bootmem allocator to free the specified ranged from
 * the given pg_data_t's bdata struct.  After this function has been called
 * for all the entries in the EFI memory map, the bootmem allocator will
 * be ready to service allocation requests.
 */
static int __init free_node_bootmem(unsigned long start, unsigned long len,
				    int node)
{
272
	free_bootmem_node(pgdat_list[node], start, len);
L
Linus Torvalds 已提交
273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290

	return 0;
}

/**
 * reserve_pernode_space - reserve memory for per-node space
 *
 * Reserve the space used by the bootmem maps & per-node space in the boot
 * allocator so that when we actually create the real mem maps we don't
 * use their memory.
 */
static void __init reserve_pernode_space(void)
{
	unsigned long base, size, pages;
	struct bootmem_data *bdp;
	int node;

	for_each_online_node(node) {
291
		pg_data_t *pdp = pgdat_list[node];
L
Linus Torvalds 已提交
292

B
bob.picco 已提交
293 294 295
		if (node_isset(node, memory_less_mask))
			continue;

L
Linus Torvalds 已提交
296 297 298 299 300 301 302 303 304 305 306 307 308 309 310
		bdp = pdp->bdata;

		/* First the bootmem_map itself */
		pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
		size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
		base = __pa(bdp->node_bootmem_map);
		reserve_bootmem_node(pdp, base, size);

		/* Now the per-node space */
		size = mem_data[node].pernode_size;
		base = __pa(mem_data[node].pernode_addr);
		reserve_bootmem_node(pdp, base, size);
	}
}

311 312 313 314 315
static void __meminit scatter_node_data(void)
{
	pg_data_t **dst;
	int node;

316 317 318 319 320 321 322 323 324 325 326 327 328
	/*
	 * for_each_online_node() can't be used at here.
	 * node_online_map is not set for hot-added nodes at this time,
	 * because we are halfway through initialization of the new node's
	 * structures.  If for_each_online_node() is used, a new node's
	 * pg_data_ptrs will be not initialized. Insted of using it,
	 * pgdat_list[] is checked.
	 */
	for_each_node(node) {
		if (pgdat_list[node]) {
			dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
			memcpy(dst, pgdat_list, sizeof(pgdat_list));
		}
329 330 331
	}
}

L
Linus Torvalds 已提交
332 333 334 335 336 337 338 339 340 341
/**
 * initialize_pernode_data - fixup per-cpu & per-node pointers
 *
 * Each node's per-node area has a copy of the global pg_data_t list, so
 * we copy that to each node here, as well as setting the per-cpu pointer
 * to the local node data structure.  The active_cpus field of the per-node
 * structure gets setup by the platform_cpu_init() function later.
 */
static void __init initialize_pernode_data(void)
{
T
Tony Luck 已提交
342
	int cpu, node;
L
Linus Torvalds 已提交
343

344 345
	scatter_node_data();

T
Tony Luck 已提交
346
#ifdef CONFIG_SMP
L
Linus Torvalds 已提交
347 348 349 350 351
	/* Set the node_data pointer for each per-cpu struct */
	for (cpu = 0; cpu < NR_CPUS; cpu++) {
		node = node_cpuid[cpu].nid;
		per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
	}
T
Tony Luck 已提交
352 353 354 355 356 357 358 359 360 361
#else
	{
		struct cpuinfo_ia64 *cpu0_cpu_info;
		cpu = 0;
		node = node_cpuid[cpu].nid;
		cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
			((char *)&per_cpu__cpu_info - __per_cpu_start));
		cpu0_cpu_info->node_data = mem_data[node].node_data;
	}
#endif /* CONFIG_SMP */
L
Linus Torvalds 已提交
362 363
}

B
bob.picco 已提交
364 365 366 367 368 369 370
/**
 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
 * 	node but fall back to any other node when __alloc_bootmem_node fails
 *	for best.
 * @nid: node id
 * @pernodesize: size of this node's pernode data
 */
B
Bob Picco 已提交
371
static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
B
bob.picco 已提交
372 373 374
{
	void *ptr = NULL;
	u8 best = 0xff;
B
Bob Picco 已提交
375
	int bestnode = -1, node, anynode = 0;
B
bob.picco 已提交
376 377 378 379 380 381 382 383

	for_each_online_node(node) {
		if (node_isset(node, memory_less_mask))
			continue;
		else if (node_distance(nid, node) < best) {
			best = node_distance(nid, node);
			bestnode = node;
		}
B
Bob Picco 已提交
384
		anynode = node;
B
bob.picco 已提交
385 386
	}

B
Bob Picco 已提交
387 388 389
	if (bestnode == -1)
		bestnode = anynode;

390
	ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
B
Bob Picco 已提交
391
		PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
B
bob.picco 已提交
392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407

	return ptr;
}

/**
 * memory_less_nodes - allocate and initialize CPU only nodes pernode
 *	information.
 */
static void __init memory_less_nodes(void)
{
	unsigned long pernodesize;
	void *pernode;
	int node;

	for_each_node_mask(node, memory_less_mask) {
		pernodesize = compute_pernodesize(node);
B
Bob Picco 已提交
408
		pernode = memory_less_node_alloc(node, pernodesize);
B
bob.picco 已提交
409 410 411 412 413 414
		fill_pernode(node, __pa(pernode), pernodesize);
	}

	return;
}

415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
#ifdef CONFIG_SPARSEMEM
/**
 * register_sparse_mem - notify SPARSEMEM that this memory range exists.
 * @start: physical start of range
 * @end: physical end of range
 * @arg: unused
 *
 * Simply calls SPARSEMEM to register memory section(s).
 */
static int __init register_sparse_mem(unsigned long start, unsigned long end,
	void *arg)
{
	int nid;

	start = __pa(start) >> PAGE_SHIFT;
	end = __pa(end) >> PAGE_SHIFT;
	nid = early_pfn_to_nid(start);
	memory_present(nid, start, end);

	return 0;
}

static void __init arch_sparse_init(void)
{
	efi_memmap_walk(register_sparse_mem, NULL);
	sparse_init();
}
#else
#define arch_sparse_init() do {} while (0)
#endif

L
Linus Torvalds 已提交
446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462
/**
 * find_memory - walk the EFI memory map and setup the bootmem allocator
 *
 * Called early in boot to setup the bootmem allocator, and to
 * allocate the per-cpu and per-node structures.
 */
void __init find_memory(void)
{
	int node;

	reserve_memory();

	if (num_online_nodes() == 0) {
		printk(KERN_ERR "node info missing!\n");
		node_set_online(0);
	}

B
bob.picco 已提交
463
	nodes_or(memory_less_mask, memory_less_mask, node_online_map);
L
Linus Torvalds 已提交
464 465 466 467 468 469 470
	min_low_pfn = -1;
	max_low_pfn = 0;

	/* These actually end up getting called by call_pernode_memory() */
	efi_memmap_walk(filter_rsvd_memory, build_node_maps);
	efi_memmap_walk(filter_rsvd_memory, find_pernode_space);

B
bob.picco 已提交
471 472 473 474 475
	for_each_online_node(node)
		if (mem_data[node].bootmem_data.node_low_pfn) {
			node_clear(node, memory_less_mask);
			mem_data[node].min_pfn = ~0UL;
		}
L
Linus Torvalds 已提交
476 477 478 479 480 481 482 483 484 485
	/*
	 * Initialize the boot memory maps in reverse order since that's
	 * what the bootmem allocator expects
	 */
	for (node = MAX_NUMNODES - 1; node >= 0; node--) {
		unsigned long pernode, pernodesize, map;
		struct bootmem_data *bdp;

		if (!node_online(node))
			continue;
B
bob.picco 已提交
486 487
		else if (node_isset(node, memory_less_mask))
			continue;
L
Linus Torvalds 已提交
488 489 490 491 492 493

		bdp = &mem_data[node].bootmem_data;
		pernode = mem_data[node].pernode_addr;
		pernodesize = mem_data[node].pernode_size;
		map = pernode + pernodesize;

494
		init_bootmem_node(pgdat_list[node],
L
Linus Torvalds 已提交
495 496 497 498 499 500 501 502
				  map>>PAGE_SHIFT,
				  bdp->node_boot_start>>PAGE_SHIFT,
				  bdp->node_low_pfn);
	}

	efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);

	reserve_pernode_space();
B
bob.picco 已提交
503
	memory_less_nodes();
L
Linus Torvalds 已提交
504 505 506 507 508 509 510
	initialize_pernode_data();

	max_pfn = max_low_pfn;

	find_initrd();
}

T
Tony Luck 已提交
511
#ifdef CONFIG_SMP
L
Linus Torvalds 已提交
512 513 514 515 516 517
/**
 * per_cpu_init - setup per-cpu variables
 *
 * find_pernode_space() does most of this already, we just need to set
 * local_per_cpu_offset
 */
518
void __cpuinit *per_cpu_init(void)
L
Linus Torvalds 已提交
519 520
{
	int cpu;
A
Ashok Raj 已提交
521 522
	static int first_time = 1;

L
Linus Torvalds 已提交
523

T
Tony Luck 已提交
524 525 526
	if (smp_processor_id() != 0)
		return __per_cpu_start + __per_cpu_offset[smp_processor_id()];

A
Ashok Raj 已提交
527 528 529 530 531
	if (first_time) {
		first_time = 0;
		for (cpu = 0; cpu < NR_CPUS; cpu++)
			per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
	}
L
Linus Torvalds 已提交
532 533 534

	return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
T
Tony Luck 已提交
535
#endif /* CONFIG_SMP */
L
Linus Torvalds 已提交
536

537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598
#ifdef CONFIG_VIRTUAL_MEM_MAP
static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
{
	unsigned long end_address, hole_next_pfn;
	unsigned long stop_address;

	end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
	end_address = PAGE_ALIGN(end_address);

	stop_address = (unsigned long) &vmem_map[
		pgdat->node_start_pfn + pgdat->node_spanned_pages];

	do {
		pgd_t *pgd;
		pud_t *pud;
		pmd_t *pmd;
		pte_t *pte;

		pgd = pgd_offset_k(end_address);
		if (pgd_none(*pgd)) {
			end_address += PGDIR_SIZE;
			continue;
		}

		pud = pud_offset(pgd, end_address);
		if (pud_none(*pud)) {
			end_address += PUD_SIZE;
			continue;
		}

		pmd = pmd_offset(pud, end_address);
		if (pmd_none(*pmd)) {
			end_address += PMD_SIZE;
			continue;
		}

		pte = pte_offset_kernel(pmd, end_address);
retry_pte:
		if (pte_none(*pte)) {
			end_address += PAGE_SIZE;
			pte++;
			if ((end_address < stop_address) &&
			    (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
				goto retry_pte;
			continue;
		}
		/* Found next valid vmem_map page */
		break;
	} while (end_address < stop_address);

	end_address = min(end_address, stop_address);
	end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
	hole_next_pfn = end_address / sizeof(struct page);
	return hole_next_pfn - pgdat->node_start_pfn;
}
#else
static inline int find_next_valid_pfn_for_pgdat(pg_data_t *pgdat, int i)
{
	return i + 1;
}
#endif

L
Linus Torvalds 已提交
599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614
/**
 * show_mem - give short summary of memory stats
 *
 * Shows a simple page count of reserved and used pages in the system.
 * For discontig machines, it does this on a per-pgdat basis.
 */
void show_mem(void)
{
	int i, total_reserved = 0;
	int total_shared = 0, total_cached = 0;
	unsigned long total_present = 0;
	pg_data_t *pgdat;

	printk("Mem-info:\n");
	show_free_areas();
	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
615
	for_each_online_pgdat(pgdat) {
616 617
		unsigned long present;
		unsigned long flags;
L
Linus Torvalds 已提交
618
		int shared = 0, cached = 0, reserved = 0;
619

L
Linus Torvalds 已提交
620
		printk("Node ID: %d\n", pgdat->node_id);
621 622
		pgdat_resize_lock(pgdat, &flags);
		present = pgdat->node_present_pages;
L
Linus Torvalds 已提交
623
		for(i = 0; i < pgdat->node_spanned_pages; i++) {
624 625 626
			struct page *page;
			if (pfn_valid(pgdat->node_start_pfn + i))
				page = pfn_to_page(pgdat->node_start_pfn + i);
627 628
			else {
				i = find_next_valid_pfn_for_pgdat(pgdat, i) - 1;
L
Linus Torvalds 已提交
629
				continue;
630
			}
631
			if (PageReserved(page))
L
Linus Torvalds 已提交
632
				reserved++;
633
			else if (PageSwapCache(page))
L
Linus Torvalds 已提交
634
				cached++;
635 636
			else if (page_count(page))
				shared += page_count(page)-1;
L
Linus Torvalds 已提交
637
		}
638
		pgdat_resize_unlock(pgdat, &flags);
L
Linus Torvalds 已提交
639 640 641 642 643 644 645 646 647 648 649 650 651
		total_present += present;
		total_reserved += reserved;
		total_cached += cached;
		total_shared += shared;
		printk("\t%ld pages of RAM\n", present);
		printk("\t%d reserved pages\n", reserved);
		printk("\t%d pages shared\n", shared);
		printk("\t%d pages swap cached\n", cached);
	}
	printk("%ld pages of RAM\n", total_present);
	printk("%d reserved pages\n", total_reserved);
	printk("%d pages shared\n", total_shared);
	printk("%d pages swap cached\n", total_cached);
652 653
	printk("Total of %ld pages in page table cache\n",
		pgtable_quicklist_total_size());
L
Linus Torvalds 已提交
654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748
	printk("%d free buffer pages\n", nr_free_buffer_pages());
}

/**
 * call_pernode_memory - use SRAT to call callback functions with node info
 * @start: physical start of range
 * @len: length of range
 * @arg: function to call for each range
 *
 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
 * out to which node a block of memory belongs.  Ignore memory that we cannot
 * identify, and split blocks that run across multiple nodes.
 *
 * Take this opportunity to round the start address up and the end address
 * down to page boundaries.
 */
void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
{
	unsigned long rs, re, end = start + len;
	void (*func)(unsigned long, unsigned long, int);
	int i;

	start = PAGE_ALIGN(start);
	end &= PAGE_MASK;
	if (start >= end)
		return;

	func = arg;

	if (!num_node_memblks) {
		/* No SRAT table, so assume one node (node 0) */
		if (start < end)
			(*func)(start, end - start, 0);
		return;
	}

	for (i = 0; i < num_node_memblks; i++) {
		rs = max(start, node_memblk[i].start_paddr);
		re = min(end, node_memblk[i].start_paddr +
			 node_memblk[i].size);

		if (rs < re)
			(*func)(rs, re - rs, node_memblk[i].nid);

		if (re == end)
			break;
	}
}

/**
 * count_node_pages - callback to build per-node memory info structures
 * @start: physical start of range
 * @len: length of range
 * @node: node where this range resides
 *
 * Each node has it's own number of physical pages, DMAable pages, start, and
 * end page frame number.  This routine will be called by call_pernode_memory()
 * for each piece of usable memory and will setup these values for each node.
 * Very similar to build_maps().
 */
static __init int count_node_pages(unsigned long start, unsigned long len, int node)
{
	unsigned long end = start + len;

	mem_data[node].num_physpages += len >> PAGE_SHIFT;
	if (start <= __pa(MAX_DMA_ADDRESS))
		mem_data[node].num_dma_physpages +=
			(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
	start = GRANULEROUNDDOWN(start);
	start = ORDERROUNDDOWN(start);
	end = GRANULEROUNDUP(end);
	mem_data[node].max_pfn = max(mem_data[node].max_pfn,
				     end >> PAGE_SHIFT);
	mem_data[node].min_pfn = min(mem_data[node].min_pfn,
				     start >> PAGE_SHIFT);

	return 0;
}

/**
 * paging_init - setup page tables
 *
 * paging_init() sets up the page tables for each node of the system and frees
 * the bootmem allocator memory for general use.
 */
void __init paging_init(void)
{
	unsigned long max_dma;
	unsigned long zones_size[MAX_NR_ZONES];
	unsigned long zholes_size[MAX_NR_ZONES];
	unsigned long pfn_offset = 0;
	int node;

	max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;

749 750
	arch_sparse_init();

L
Linus Torvalds 已提交
751 752
	efi_memmap_walk(filter_rsvd_memory, count_node_pages);

753
#ifdef CONFIG_VIRTUAL_MEM_MAP
754 755
	vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
		sizeof(struct page));
B
bob.picco 已提交
756 757 758
	vmem_map = (struct page *) vmalloc_end;
	efi_memmap_walk(create_mem_map_page_table, NULL);
	printk("Virtual mem_map starts at 0x%p\n", vmem_map);
759
#endif
B
bob.picco 已提交
760

L
Linus Torvalds 已提交
761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795
	for_each_online_node(node) {
		memset(zones_size, 0, sizeof(zones_size));
		memset(zholes_size, 0, sizeof(zholes_size));

		num_physpages += mem_data[node].num_physpages;

		if (mem_data[node].min_pfn >= max_dma) {
			/* All of this node's memory is above ZONE_DMA */
			zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
				mem_data[node].min_pfn;
			zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn -
				mem_data[node].min_pfn -
				mem_data[node].num_physpages;
		} else if (mem_data[node].max_pfn < max_dma) {
			/* All of this node's memory is in ZONE_DMA */
			zones_size[ZONE_DMA] = mem_data[node].max_pfn -
				mem_data[node].min_pfn;
			zholes_size[ZONE_DMA] = mem_data[node].max_pfn -
				mem_data[node].min_pfn -
				mem_data[node].num_dma_physpages;
		} else {
			/* This node has memory in both zones */
			zones_size[ZONE_DMA] = max_dma -
				mem_data[node].min_pfn;
			zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
				mem_data[node].num_dma_physpages;
			zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
				max_dma;
			zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] -
				(mem_data[node].num_physpages -
				 mem_data[node].num_dma_physpages);
		}

		pfn_offset = mem_data[node].min_pfn;

796
#ifdef CONFIG_VIRTUAL_MEM_MAP
L
Linus Torvalds 已提交
797
		NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
798
#endif
L
Linus Torvalds 已提交
799 800 801 802 803 804
		free_area_init_node(node, NODE_DATA(node), zones_size,
				    pfn_offset, zholes_size);
	}

	zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
805

806 807 808 809 810 811 812 813 814 815 816 817
pg_data_t *arch_alloc_nodedata(int nid)
{
	unsigned long size = compute_pernodesize(nid);

	return kzalloc(size, GFP_KERNEL);
}

void arch_free_nodedata(pg_data_t *pgdat)
{
	kfree(pgdat);
}

818 819 820 821 822
void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
{
	pgdat_list[update_node] = update_pgdat;
	scatter_node_data();
}