numa.c 28.9 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * pSeries NUMA support
 *
 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
#include <linux/threads.h>
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/module.h>
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
20
#include <linux/lmb.h>
21
#include <linux/of.h>
22
#include <linux/pfn.h>
23
#include <asm/sparsemem.h>
24
#include <asm/prom.h>
25
#include <asm/system.h>
P
Paul Mackerras 已提交
26
#include <asm/smp.h>
L
Linus Torvalds 已提交
27 28 29

static int numa_enabled = 1;

30 31
static char *cmdline __initdata;

L
Linus Torvalds 已提交
32 33 34
static int numa_debug;
#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }

35
int numa_cpu_lookup_table[NR_CPUS];
36
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
L
Linus Torvalds 已提交
37
struct pglist_data *node_data[MAX_NUMNODES];
38 39

EXPORT_SYMBOL(numa_cpu_lookup_table);
40
EXPORT_SYMBOL(node_to_cpumask_map);
41 42
EXPORT_SYMBOL(node_data);

L
Linus Torvalds 已提交
43
static int min_common_depth;
44
static int n_mem_addr_cells, n_mem_size_cells;
L
Linus Torvalds 已提交
45

46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
/*
 * Allocate node_to_cpumask_map based on number of available nodes
 * Requires node_possible_map to be valid.
 *
 * Note: node_to_cpumask() is not valid until after this is done.
 */
static void __init setup_node_to_cpumask_map(void)
{
	unsigned int node, num = 0;

	/* setup nr_node_ids if not done yet */
	if (nr_node_ids == MAX_NUMNODES) {
		for_each_node_mask(node, node_possible_map)
			num = node;
		nr_node_ids = num + 1;
	}

	/* allocate the map */
	for (node = 0; node < nr_node_ids; node++)
		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);

	/* cpumask_of_node() will now work */
	dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
}

71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117
static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
						unsigned int *nid)
{
	unsigned long long mem;
	char *p = cmdline;
	static unsigned int fake_nid;
	static unsigned long long curr_boundary;

	/*
	 * Modify node id, iff we started creating NUMA nodes
	 * We want to continue from where we left of the last time
	 */
	if (fake_nid)
		*nid = fake_nid;
	/*
	 * In case there are no more arguments to parse, the
	 * node_id should be the same as the last fake node id
	 * (we've handled this above).
	 */
	if (!p)
		return 0;

	mem = memparse(p, &p);
	if (!mem)
		return 0;

	if (mem < curr_boundary)
		return 0;

	curr_boundary = mem;

	if ((end_pfn << PAGE_SHIFT) > mem) {
		/*
		 * Skip commas and spaces
		 */
		while (*p == ',' || *p == ' ' || *p == '\t')
			p++;

		cmdline = p;
		fake_nid++;
		*nid = fake_nid;
		dbg("created new fake_node with id %d\n", fake_nid);
		return 1;
	}
	return 0;
}

118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144
/*
 * get_active_region_work_fn - A helper function for get_node_active_region
 *	Returns datax set to the start_pfn and end_pfn if they contain
 *	the initial value of datax->start_pfn between them
 * @start_pfn: start page(inclusive) of region to check
 * @end_pfn: end page(exclusive) of region to check
 * @datax: comes in with ->start_pfn set to value to search for and
 *	goes out with active range if it contains it
 * Returns 1 if search value is in range else 0
 */
static int __init get_active_region_work_fn(unsigned long start_pfn,
					unsigned long end_pfn, void *datax)
{
	struct node_active_region *data;
	data = (struct node_active_region *)datax;

	if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) {
		data->start_pfn = start_pfn;
		data->end_pfn = end_pfn;
		return 1;
	}
	return 0;

}

/*
 * get_node_active_region - Return active region containing start_pfn
145
 * Active range returned is empty if none found.
146 147 148 149 150 151 152 153 154 155
 * @start_pfn: The page to return the region for.
 * @node_ar: Returned set to the active region containing start_pfn
 */
static void __init get_node_active_region(unsigned long start_pfn,
		       struct node_active_region *node_ar)
{
	int nid = early_pfn_to_nid(start_pfn);

	node_ar->nid = nid;
	node_ar->start_pfn = start_pfn;
156
	node_ar->end_pfn = start_pfn;
157 158 159
	work_with_active_regions(nid, get_active_region_work_fn, node_ar);
}

160
static void __cpuinit map_cpu_to_node(int cpu, int node)
L
Linus Torvalds 已提交
161 162
{
	numa_cpu_lookup_table[cpu] = node;
163

164 165
	dbg("adding cpu %d to node %d\n", cpu, node);

166 167
	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
L
Linus Torvalds 已提交
168 169 170 171 172 173 174 175 176
}

#ifdef CONFIG_HOTPLUG_CPU
static void unmap_cpu_from_node(unsigned long cpu)
{
	int node = numa_cpu_lookup_table[cpu];

	dbg("removing cpu %lu from node %d\n", cpu, node);

177 178
	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
L
Linus Torvalds 已提交
179 180 181 182 183 184 185 186
	} else {
		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
		       cpu, node);
	}
}
#endif /* CONFIG_HOTPLUG_CPU */

/* must hold reference to node during call */
187
static const int *of_get_associativity(struct device_node *dev)
L
Linus Torvalds 已提交
188
{
189
	return of_get_property(dev, "ibm,associativity", NULL);
L
Linus Torvalds 已提交
190 191
}

192 193 194 195 196 197 198 199 200 201 202 203 204 205 206
/*
 * Returns the property linux,drconf-usable-memory if
 * it exists (the property exists only in kexec/kdump kernels,
 * added by kexec-tools)
 */
static const u32 *of_get_usable_memory(struct device_node *memory)
{
	const u32 *prop;
	u32 len;
	prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
	if (!prop || len < sizeof(unsigned int))
		return 0;
	return prop;
}

207 208 209
/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
 * info is found.
 */
210
static int of_node_to_nid_single(struct device_node *device)
L
Linus Torvalds 已提交
211
{
212
	int nid = -1;
213
	const unsigned int *tmp;
L
Linus Torvalds 已提交
214 215

	if (min_common_depth == -1)
216
		goto out;
L
Linus Torvalds 已提交
217 218

	tmp = of_get_associativity(device);
219 220 221 222
	if (!tmp)
		goto out;

	if (tmp[0] >= min_common_depth)
223
		nid = tmp[min_common_depth];
224 225

	/* POWER4 LPAR uses 0xffff as invalid node */
226 227 228
	if (nid == 0xffff || nid >= MAX_NUMNODES)
		nid = -1;
out:
229
	return nid;
L
Linus Torvalds 已提交
230 231
}

232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253
/* Walk the device tree upwards, looking for an associativity id */
int of_node_to_nid(struct device_node *device)
{
	struct device_node *tmp;
	int nid = -1;

	of_node_get(device);
	while (device) {
		nid = of_node_to_nid_single(device);
		if (nid != -1)
			break;

	        tmp = device;
		device = of_get_parent(tmp);
		of_node_put(tmp);
	}
	of_node_put(device);

	return nid;
}
EXPORT_SYMBOL_GPL(of_node_to_nid);

L
Linus Torvalds 已提交
254 255 256 257 258 259 260
/*
 * In theory, the "ibm,associativity" property may contain multiple
 * associativity lists because a resource may be multiply connected
 * into the machine.  This resource then has different associativity
 * characteristics relative to its multiple connections.  We ignore
 * this for now.  We also assume that all cpu and memory sets have
 * their distances represented at a common level.  This won't be
261
 * true for hierarchical NUMA.
L
Linus Torvalds 已提交
262 263 264 265 266 267 268 269
 *
 * In any case the ibm,associativity-reference-points should give
 * the correct depth for a normal NUMA system.
 *
 * - Dave Hansen <haveblue@us.ibm.com>
 */
static int __init find_min_common_depth(void)
{
270
	int depth, index;
271
	const unsigned int *ref_points;
L
Linus Torvalds 已提交
272 273
	struct device_node *rtas_root;
	unsigned int len;
274 275
	struct device_node *chosen;
	const char *vec5;
L
Linus Torvalds 已提交
276 277 278 279 280 281 282 283 284 285 286 287

	rtas_root = of_find_node_by_path("/rtas");

	if (!rtas_root)
		return -1;

	/*
	 * this property is 2 32-bit integers, each representing a level of
	 * depth in the associativity nodes.  The first is for an SMP
	 * configuration (should be all 0's) and the second is for a normal
	 * NUMA configuration.
	 */
288
	index = 1;
289
	ref_points = of_get_property(rtas_root,
L
Linus Torvalds 已提交
290 291
			"ibm,associativity-reference-points", &len);

292
	/*
293
	 * For form 1 affinity information we want the first field
294
	 */
295 296 297 298 299 300 301 302 303
#define VEC5_AFFINITY_BYTE	5
#define VEC5_AFFINITY		0x80
	chosen = of_find_node_by_path("/chosen");
	if (chosen) {
		vec5 = of_get_property(chosen, "ibm,architecture-vec-5", NULL);
		if (vec5 && (vec5[VEC5_AFFINITY_BYTE] & VEC5_AFFINITY)) {
			dbg("Using form 1 affinity\n");
			index = 0;
		}
304 305
	}

306
	if ((len >= 2 * sizeof(unsigned int)) && ref_points) {
307
		depth = ref_points[index];
L
Linus Torvalds 已提交
308
	} else {
309
		dbg("NUMA: ibm,associativity-reference-points not found.\n");
L
Linus Torvalds 已提交
310 311 312 313 314 315 316
		depth = -1;
	}
	of_node_put(rtas_root);

	return depth;
}

317
static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
L
Linus Torvalds 已提交
318 319 320 321
{
	struct device_node *memory = NULL;

	memory = of_find_node_by_type(memory, "memory");
322
	if (!memory)
323
		panic("numa.c: No memory nodes found!");
324

325
	*n_addr_cells = of_n_addr_cells(memory);
326
	*n_size_cells = of_n_size_cells(memory);
327
	of_node_put(memory);
L
Linus Torvalds 已提交
328 329
}

330
static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
L
Linus Torvalds 已提交
331 332 333 334 335 336 337 338 339 340
{
	unsigned long result = 0;

	while (n--) {
		result = (result << 32) | **buf;
		(*buf)++;
	}
	return result;
}

341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478
struct of_drconf_cell {
	u64	base_addr;
	u32	drc_index;
	u32	reserved;
	u32	aa_index;
	u32	flags;
};

#define DRCONF_MEM_ASSIGNED	0x00000008
#define DRCONF_MEM_AI_INVALID	0x00000040
#define DRCONF_MEM_RESERVED	0x00000080

/*
 * Read the next lmb list entry from the ibm,dynamic-memory property
 * and return the information in the provided of_drconf_cell structure.
 */
static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
{
	const u32 *cp;

	drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);

	cp = *cellp;
	drmem->drc_index = cp[0];
	drmem->reserved = cp[1];
	drmem->aa_index = cp[2];
	drmem->flags = cp[3];

	*cellp = cp + 4;
}

/*
 * Retreive and validate the ibm,dynamic-memory property of the device tree.
 *
 * The layout of the ibm,dynamic-memory property is a number N of lmb
 * list entries followed by N lmb list entries.  Each lmb list entry
 * contains information as layed out in the of_drconf_cell struct above.
 */
static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
{
	const u32 *prop;
	u32 len, entries;

	prop = of_get_property(memory, "ibm,dynamic-memory", &len);
	if (!prop || len < sizeof(unsigned int))
		return 0;

	entries = *prop++;

	/* Now that we know the number of entries, revalidate the size
	 * of the property read in to ensure we have everything
	 */
	if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
		return 0;

	*dm = prop;
	return entries;
}

/*
 * Retreive and validate the ibm,lmb-size property for drconf memory
 * from the device tree.
 */
static u64 of_get_lmb_size(struct device_node *memory)
{
	const u32 *prop;
	u32 len;

	prop = of_get_property(memory, "ibm,lmb-size", &len);
	if (!prop || len < sizeof(unsigned int))
		return 0;

	return read_n_cells(n_mem_size_cells, &prop);
}

struct assoc_arrays {
	u32	n_arrays;
	u32	array_sz;
	const u32 *arrays;
};

/*
 * Retreive and validate the list of associativity arrays for drconf
 * memory from the ibm,associativity-lookup-arrays property of the
 * device tree..
 *
 * The layout of the ibm,associativity-lookup-arrays property is a number N
 * indicating the number of associativity arrays, followed by a number M
 * indicating the size of each associativity array, followed by a list
 * of N associativity arrays.
 */
static int of_get_assoc_arrays(struct device_node *memory,
			       struct assoc_arrays *aa)
{
	const u32 *prop;
	u32 len;

	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
	if (!prop || len < 2 * sizeof(unsigned int))
		return -1;

	aa->n_arrays = *prop++;
	aa->array_sz = *prop++;

	/* Now that we know the number of arrrays and size of each array,
	 * revalidate the size of the property read in.
	 */
	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
		return -1;

	aa->arrays = prop;
	return 0;
}

/*
 * This is like of_node_to_nid_single() for memory represented in the
 * ibm,dynamic-reconfiguration-memory node.
 */
static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
				   struct assoc_arrays *aa)
{
	int default_nid = 0;
	int nid = default_nid;
	int index;

	if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
	    !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
	    drmem->aa_index < aa->n_arrays) {
		index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
		nid = aa->arrays[index];

		if (nid == 0xffff || nid >= MAX_NUMNODES)
			nid = default_nid;
	}

	return nid;
}

L
Linus Torvalds 已提交
479 480 481 482
/*
 * Figure out to which domain a cpu belongs and stick it there.
 * Return the id of the domain used.
 */
483
static int __cpuinit numa_setup_cpu(unsigned long lcpu)
L
Linus Torvalds 已提交
484
{
485
	int nid = 0;
486
	struct device_node *cpu = of_get_cpu_node(lcpu, NULL);
L
Linus Torvalds 已提交
487 488 489 490 491 492

	if (!cpu) {
		WARN_ON(1);
		goto out;
	}

493
	nid = of_node_to_nid_single(cpu);
L
Linus Torvalds 已提交
494

495
	if (nid < 0 || !node_online(nid))
496
		nid = first_online_node;
L
Linus Torvalds 已提交
497
out:
498
	map_cpu_to_node(lcpu, nid);
L
Linus Torvalds 已提交
499 500 501

	of_node_put(cpu);

502
	return nid;
L
Linus Torvalds 已提交
503 504
}

505
static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
L
Linus Torvalds 已提交
506 507 508 509 510 511 512 513
			     unsigned long action,
			     void *hcpu)
{
	unsigned long lcpu = (unsigned long)hcpu;
	int ret = NOTIFY_DONE;

	switch (action) {
	case CPU_UP_PREPARE:
514
	case CPU_UP_PREPARE_FROZEN:
515
		numa_setup_cpu(lcpu);
L
Linus Torvalds 已提交
516 517 518 519
		ret = NOTIFY_OK;
		break;
#ifdef CONFIG_HOTPLUG_CPU
	case CPU_DEAD:
520
	case CPU_DEAD_FROZEN:
L
Linus Torvalds 已提交
521
	case CPU_UP_CANCELED:
522
	case CPU_UP_CANCELED_FROZEN:
L
Linus Torvalds 已提交
523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538
		unmap_cpu_from_node(lcpu);
		break;
		ret = NOTIFY_OK;
#endif
	}
	return ret;
}

/*
 * Check and possibly modify a memory region to enforce the memory limit.
 *
 * Returns the size the region should have to enforce the memory limit.
 * This will either be the original value of size, a truncated value,
 * or zero. If the returned value of size is 0 the region should be
 * discarded as it lies wholy above the memory limit.
 */
539 540
static unsigned long __init numa_enforce_memory_limit(unsigned long start,
						      unsigned long size)
L
Linus Torvalds 已提交
541 542 543 544
{
	/*
	 * We use lmb_end_of_DRAM() in here instead of memory_limit because
	 * we've already adjusted it for the limit and it takes care of
545 546
	 * having memory holes below the limit.  Also, in the case of
	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
L
Linus Torvalds 已提交
547 548 549 550 551 552 553 554 555 556 557
	 */

	if (start + size <= lmb_end_of_DRAM())
		return size;

	if (start >= lmb_end_of_DRAM())
		return 0;

	return lmb_end_of_DRAM() - start;
}

558 559 560 561 562 563 564 565 566 567 568 569 570 571 572
/*
 * Reads the counter for a given entry in
 * linux,drconf-usable-memory property
 */
static inline int __init read_usm_ranges(const u32 **usm)
{
	/*
	 * For each lmb in ibm,dynamic-memory a corresponding
	 * entry in linux,drconf-usable-memory property contains
	 * a counter followed by that many (base, size) duple.
	 * read the counter from linux,drconf-usable-memory
	 */
	return read_n_cells(n_mem_size_cells, usm);
}

573 574 575 576 577 578
/*
 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 * node.  This assumes n_mem_{addr,size}_cells have been set.
 */
static void __init parse_drconf_memory(struct device_node *memory)
{
579 580 581
	const u32 *dm, *usm;
	unsigned int n, rc, ranges, is_kexec_kdump = 0;
	unsigned long lmb_size, base, size, sz;
582 583 584 585 586
	int nid;
	struct assoc_arrays aa;

	n = of_get_drconf_memory(memory, &dm);
	if (!n)
587 588
		return;

589 590 591 592 593 594
	lmb_size = of_get_lmb_size(memory);
	if (!lmb_size)
		return;

	rc = of_get_assoc_arrays(memory, &aa);
	if (rc)
595 596
		return;

597 598 599 600 601
	/* check if this is a kexec/kdump kernel */
	usm = of_get_usable_memory(memory);
	if (usm != NULL)
		is_kexec_kdump = 1;

602
	for (; n != 0; --n) {
603 604 605 606 607 608 609 610
		struct of_drconf_cell drmem;

		read_drconf_cell(&drmem, &dm);

		/* skip this block if the reserved bit is set in flags (0x80)
		   or if the block is not assigned to this partition (0x8) */
		if ((drmem.flags & DRCONF_MEM_RESERVED)
		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
611
			continue;
612

613 614 615
		base = drmem.base_addr;
		size = lmb_size;
		ranges = 1;
616

617 618 619 620 621 622 623 624 625 626 627 628 629
		if (is_kexec_kdump) {
			ranges = read_usm_ranges(&usm);
			if (!ranges) /* there are no (base, size) duple */
				continue;
		}
		do {
			if (is_kexec_kdump) {
				base = read_n_cells(n_mem_addr_cells, &usm);
				size = read_n_cells(n_mem_size_cells, &usm);
			}
			nid = of_drconf_to_nid_single(&drmem, &aa);
			fake_numa_create_new_node(
				((base + size) >> PAGE_SHIFT),
630
					   &nid);
631 632 633 634 635 636 637
			node_set_online(nid);
			sz = numa_enforce_memory_limit(base, size);
			if (sz)
				add_active_range(nid, base >> PAGE_SHIFT,
						 (base >> PAGE_SHIFT)
						 + (sz >> PAGE_SHIFT));
		} while (--ranges);
638 639 640
	}
}

L
Linus Torvalds 已提交
641 642 643 644
static int __init parse_numa_properties(void)
{
	struct device_node *cpu = NULL;
	struct device_node *memory = NULL;
645
	int default_nid = 0;
L
Linus Torvalds 已提交
646 647 648 649 650 651 652 653 654 655 656 657
	unsigned long i;

	if (numa_enabled == 0) {
		printk(KERN_WARNING "NUMA disabled by user\n");
		return -1;
	}

	min_common_depth = find_min_common_depth();

	if (min_common_depth < 0)
		return min_common_depth;

658 659
	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);

L
Linus Torvalds 已提交
660
	/*
661 662 663
	 * Even though we connect cpus to numa domains later in SMP
	 * init, we need to know the node ids now. This is because
	 * each node to be onlined must have NODE_DATA etc backing it.
L
Linus Torvalds 已提交
664
	 */
665
	for_each_present_cpu(i) {
666
		int nid;
L
Linus Torvalds 已提交
667

668
		cpu = of_get_cpu_node(i, NULL);
669
		BUG_ON(!cpu);
670
		nid = of_node_to_nid_single(cpu);
671
		of_node_put(cpu);
L
Linus Torvalds 已提交
672

673 674 675 676 677 678 679 680
		/*
		 * Don't fall back to default_nid yet -- we will plug
		 * cpus into nodes once the memory scan has discovered
		 * the topology.
		 */
		if (nid < 0)
			continue;
		node_set_online(nid);
L
Linus Torvalds 已提交
681 682
	}

683
	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
L
Linus Torvalds 已提交
684 685 686 687
	memory = NULL;
	while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
		unsigned long start;
		unsigned long size;
688
		int nid;
L
Linus Torvalds 已提交
689
		int ranges;
690
		const unsigned int *memcell_buf;
L
Linus Torvalds 已提交
691 692
		unsigned int len;

693
		memcell_buf = of_get_property(memory,
694 695
			"linux,usable-memory", &len);
		if (!memcell_buf || len <= 0)
696
			memcell_buf = of_get_property(memory, "reg", &len);
L
Linus Torvalds 已提交
697 698 699
		if (!memcell_buf || len <= 0)
			continue;

700 701
		/* ranges in cell */
		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
L
Linus Torvalds 已提交
702 703
new_range:
		/* these are order-sensitive, and modify the buffer pointer */
704 705
		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
		size = read_n_cells(n_mem_size_cells, &memcell_buf);
L
Linus Torvalds 已提交
706

707 708 709 710 711
		/*
		 * Assumption: either all memory nodes or none will
		 * have associativity properties.  If none, then
		 * everything goes to default_nid.
		 */
712
		nid = of_node_to_nid_single(memory);
713 714
		if (nid < 0)
			nid = default_nid;
715 716

		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
717
		node_set_online(nid);
L
Linus Torvalds 已提交
718

719
		if (!(size = numa_enforce_memory_limit(start, size))) {
L
Linus Torvalds 已提交
720 721 722 723 724 725
			if (--ranges)
				goto new_range;
			else
				continue;
		}

726 727
		add_active_range(nid, start >> PAGE_SHIFT,
				(start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
L
Linus Torvalds 已提交
728 729 730 731 732

		if (--ranges)
			goto new_range;
	}

733 734 735 736 737 738 739 740
	/*
	 * Now do the same thing for each LMB listed in the ibm,dynamic-memory
	 * property in the ibm,dynamic-reconfiguration-memory node.
	 */
	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (memory)
		parse_drconf_memory(memory);

L
Linus Torvalds 已提交
741 742 743 744 745 746 747
	return 0;
}

static void __init setup_nonnuma(void)
{
	unsigned long top_of_ram = lmb_end_of_DRAM();
	unsigned long total_ram = lmb_phys_mem_size();
748
	unsigned long start_pfn, end_pfn;
749
	unsigned int i, nid = 0;
L
Linus Torvalds 已提交
750

751
	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
L
Linus Torvalds 已提交
752
	       top_of_ram, total_ram);
753
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
L
Linus Torvalds 已提交
754 755
	       (top_of_ram - total_ram) >> 20);

756 757 758
	for (i = 0; i < lmb.memory.cnt; ++i) {
		start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
		end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
759 760 761 762

		fake_numa_create_new_node(end_pfn, &nid);
		add_active_range(nid, start_pfn, end_pfn);
		node_set_online(nid);
763
	}
L
Linus Torvalds 已提交
764 765
}

766 767 768 769 770 771 772 773 774
void __init dump_numa_cpu_topology(void)
{
	unsigned int node;
	unsigned int cpu, count;

	if (min_common_depth == -1 || !numa_enabled)
		return;

	for_each_online_node(node) {
775
		printk(KERN_DEBUG "Node %d CPUs:", node);
776 777 778 779 780 781

		count = 0;
		/*
		 * If we used a CPU iterator here we would miss printing
		 * the holes in the cpumap.
		 */
782 783 784
		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
			if (cpumask_test_cpu(cpu,
					node_to_cpumask_map[node])) {
785 786 787 788 789 790 791 792 793 794 795
				if (count == 0)
					printk(" %u", cpu);
				++count;
			} else {
				if (count > 1)
					printk("-%u", cpu - 1);
				count = 0;
			}
		}

		if (count > 1)
796
			printk("-%u", nr_cpu_ids - 1);
797 798 799 800 801
		printk("\n");
	}
}

static void __init dump_numa_memory_topology(void)
L
Linus Torvalds 已提交
802 803 804 805 806 807 808 809 810 811
{
	unsigned int node;
	unsigned int count;

	if (min_common_depth == -1 || !numa_enabled)
		return;

	for_each_online_node(node) {
		unsigned long i;

812
		printk(KERN_DEBUG "Node %d Memory:", node);
L
Linus Torvalds 已提交
813 814 815

		count = 0;

816 817 818
		for (i = 0; i < lmb_end_of_DRAM();
		     i += (1 << SECTION_SIZE_BITS)) {
			if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
L
Linus Torvalds 已提交
819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839
				if (count == 0)
					printk(" 0x%lx", i);
				++count;
			} else {
				if (count > 0)
					printk("-0x%lx", i);
				count = 0;
			}
		}

		if (count > 0)
			printk("-0x%lx", i);
		printk("\n");
	}
}

/*
 * Allocate some memory, satisfying the lmb or bootmem allocator where
 * required. nid is the preferred node and end is the physical address of
 * the highest address in the node.
 *
840
 * Returns the virtual address of the memory.
L
Linus Torvalds 已提交
841
 */
842
static void __init *careful_zallocation(int nid, unsigned long size,
843 844
				       unsigned long align,
				       unsigned long end_pfn)
L
Linus Torvalds 已提交
845
{
846
	void *ret;
847
	int new_nid;
848 849 850
	unsigned long ret_paddr;

	ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
L
Linus Torvalds 已提交
851 852

	/* retry over all memory */
853 854
	if (!ret_paddr)
		ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
L
Linus Torvalds 已提交
855

856
	if (!ret_paddr)
857
		panic("numa.c: cannot allocate %lu bytes for node %d",
L
Linus Torvalds 已提交
858 859
		      size, nid);

860 861
	ret = __va(ret_paddr);

L
Linus Torvalds 已提交
862
	/*
863 864 865 866 867 868 869 870 871 872
	 * We initialize the nodes in numeric order: 0, 1, 2...
	 * and hand over control from the LMB allocator to the
	 * bootmem allocator.  If this function is called for
	 * node 5, then we know that all nodes <5 are using the
	 * bootmem allocator instead of the LMB allocator.
	 *
	 * So, check the nid from which this allocation came
	 * and double check to see if we need to use bootmem
	 * instead of the LMB.  We don't free the LMB memory
	 * since it would be useless.
L
Linus Torvalds 已提交
873
	 */
874
	new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
875
	if (new_nid < nid) {
876
		ret = __alloc_bootmem_node(NODE_DATA(new_nid),
L
Linus Torvalds 已提交
877 878
				size, align, 0);

879
		dbg("alloc_bootmem %p %lx\n", ret, size);
L
Linus Torvalds 已提交
880 881
	}

882
	memset(ret, 0, size);
883
	return ret;
L
Linus Torvalds 已提交
884 885
}

886 887 888 889 890
static struct notifier_block __cpuinitdata ppc64_numa_nb = {
	.notifier_call = cpu_numa_callback,
	.priority = 1 /* Must run before sched domains notifier. */
};

891 892 893 894 895 896 897 898 899
static void mark_reserved_regions_for_nid(int nid)
{
	struct pglist_data *node = NODE_DATA(nid);
	int i;

	for (i = 0; i < lmb.reserved.cnt; i++) {
		unsigned long physbase = lmb.reserved.region[i].base;
		unsigned long size = lmb.reserved.region[i].size;
		unsigned long start_pfn = physbase >> PAGE_SHIFT;
900
		unsigned long end_pfn = PFN_UP(physbase + size);
901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925
		struct node_active_region node_ar;
		unsigned long node_end_pfn = node->node_start_pfn +
					     node->node_spanned_pages;

		/*
		 * Check to make sure that this lmb.reserved area is
		 * within the bounds of the node that we care about.
		 * Checking the nid of the start and end points is not
		 * sufficient because the reserved area could span the
		 * entire node.
		 */
		if (end_pfn <= node->node_start_pfn ||
		    start_pfn >= node_end_pfn)
			continue;

		get_node_active_region(start_pfn, &node_ar);
		while (start_pfn < end_pfn &&
			node_ar.start_pfn < node_ar.end_pfn) {
			unsigned long reserve_size = size;
			/*
			 * if reserved region extends past active region
			 * then trim size to active region
			 */
			if (end_pfn > node_ar.end_pfn)
				reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
926
					- physbase;
927 928 929 930 931 932 933 934 935 936 937
			/*
			 * Only worry about *this* node, others may not
			 * yet have valid NODE_DATA().
			 */
			if (node_ar.nid == nid) {
				dbg("reserve_bootmem %lx %lx nid=%d\n",
					physbase, reserve_size, node_ar.nid);
				reserve_bootmem_node(NODE_DATA(node_ar.nid),
						physbase, reserve_size,
						BOOTMEM_DEFAULT);
			}
938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958
			/*
			 * if reserved region is contained in the active region
			 * then done.
			 */
			if (end_pfn <= node_ar.end_pfn)
				break;

			/*
			 * reserved region extends past the active region
			 *   get next active region that contains this
			 *   reserved region
			 */
			start_pfn = node_ar.end_pfn;
			physbase = start_pfn << PAGE_SHIFT;
			size = size - reserve_size;
			get_node_active_region(start_pfn, &node_ar);
		}
	}
}


L
Linus Torvalds 已提交
959 960 961 962 963 964 965 966 967 968 969
void __init do_init_bootmem(void)
{
	int nid;

	min_low_pfn = 0;
	max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
	max_pfn = max_low_pfn;

	if (parse_numa_properties())
		setup_nonnuma();
	else
970
		dump_numa_memory_topology();
L
Linus Torvalds 已提交
971 972

	for_each_online_node(nid) {
973
		unsigned long start_pfn, end_pfn;
974
		void *bootmem_vaddr;
L
Linus Torvalds 已提交
975 976
		unsigned long bootmap_pages;

977
		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
L
Linus Torvalds 已提交
978

979 980 981 982 983 984 985
		/*
		 * Allocate the node structure node local if possible
		 *
		 * Be careful moving this around, as it relies on all
		 * previous nodes' bootmem to be initialized and have
		 * all reserved areas marked.
		 */
986
		NODE_DATA(nid) = careful_zallocation(nid,
L
Linus Torvalds 已提交
987
					sizeof(struct pglist_data),
988
					SMP_CACHE_BYTES, end_pfn);
L
Linus Torvalds 已提交
989 990 991 992

  		dbg("node %d\n", nid);
		dbg("NODE_DATA() = %p\n", NODE_DATA(nid));

993
		NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
994 995
		NODE_DATA(nid)->node_start_pfn = start_pfn;
		NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
L
Linus Torvalds 已提交
996 997 998 999

		if (NODE_DATA(nid)->node_spanned_pages == 0)
  			continue;

1000 1001
  		dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
  		dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
L
Linus Torvalds 已提交
1002

1003
		bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1004
		bootmem_vaddr = careful_zallocation(nid,
1005 1006
					bootmap_pages << PAGE_SHIFT,
					PAGE_SIZE, end_pfn);
L
Linus Torvalds 已提交
1007

1008
		dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
L
Linus Torvalds 已提交
1009

1010 1011
		init_bootmem_node(NODE_DATA(nid),
				  __pa(bootmem_vaddr) >> PAGE_SHIFT,
1012
				  start_pfn, end_pfn);
L
Linus Torvalds 已提交
1013

1014
		free_bootmem_with_active_regions(nid, end_pfn);
1015 1016
		/*
		 * Be very careful about moving this around.  Future
1017
		 * calls to careful_zallocation() depend on this getting
1018 1019 1020
		 * done correctly.
		 */
		mark_reserved_regions_for_nid(nid);
1021
		sparse_memory_present_with_active_regions(nid);
1022
	}
1023 1024

	init_bootmem_done = 1;
1025 1026 1027 1028 1029 1030 1031 1032 1033 1034

	/*
	 * Now bootmem is initialised we can create the node to cpumask
	 * lookup tables and setup the cpu callback to populate them.
	 */
	setup_node_to_cpumask_map();

	register_cpu_notifier(&ppc64_numa_nb);
	cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
			  (void *)(unsigned long)boot_cpuid);
L
Linus Torvalds 已提交
1035 1036 1037 1038
}

void __init paging_init(void)
{
1039 1040 1041
	unsigned long max_zone_pfns[MAX_NR_ZONES];
	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
	max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
1042
	free_area_init_nodes(max_zone_pfns);
L
Linus Torvalds 已提交
1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
}

static int __init early_numa(char *p)
{
	if (!p)
		return 0;

	if (strstr(p, "off"))
		numa_enabled = 0;

	if (strstr(p, "debug"))
		numa_debug = 1;

1056 1057 1058 1059
	p = strstr(p, "fake=");
	if (p)
		cmdline = p + strlen("fake=");

L
Linus Torvalds 已提交
1060 1061 1062
	return 0;
}
early_param("numa", early_numa);
1063 1064

#ifdef CONFIG_MEMORY_HOTPLUG
1065
/*
1066 1067 1068
 * Find the node associated with a hot added memory section for
 * memory represented in the device tree by the property
 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
1069 1070 1071 1072 1073
 */
static int hot_add_drconf_scn_to_nid(struct device_node *memory,
				     unsigned long scn_addr)
{
	const u32 *dm;
1074
	unsigned int drconf_cell_cnt, rc;
1075 1076
	unsigned long lmb_size;
	struct assoc_arrays aa;
1077
	int nid = -1;
1078

1079 1080 1081
	drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
	if (!drconf_cell_cnt)
		return -1;
1082 1083 1084

	lmb_size = of_get_lmb_size(memory);
	if (!lmb_size)
1085
		return -1;
1086 1087 1088

	rc = of_get_assoc_arrays(memory, &aa);
	if (rc)
1089
		return -1;
1090

1091
	for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1092 1093 1094 1095 1096 1097 1098 1099 1100 1101
		struct of_drconf_cell drmem;

		read_drconf_cell(&drmem, &dm);

		/* skip this block if it is reserved or not assigned to
		 * this partition */
		if ((drmem.flags & DRCONF_MEM_RESERVED)
		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
			continue;

1102 1103 1104 1105
		if ((scn_addr < drmem.base_addr)
		    || (scn_addr >= (drmem.base_addr + lmb_size)))
			continue;

1106
		nid = of_drconf_to_nid_single(&drmem, &aa);
1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145
		break;
	}

	return nid;
}

/*
 * Find the node associated with a hot added memory section for memory
 * represented in the device tree as a node (i.e. memory@XXXX) for
 * each lmb.
 */
int hot_add_node_scn_to_nid(unsigned long scn_addr)
{
	struct device_node *memory = NULL;
	int nid = -1;

	while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
		unsigned long start, size;
		int ranges;
		const unsigned int *memcell_buf;
		unsigned int len;

		memcell_buf = of_get_property(memory, "reg", &len);
		if (!memcell_buf || len <= 0)
			continue;

		/* ranges in cell */
		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);

		while (ranges--) {
			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
			size = read_n_cells(n_mem_size_cells, &memcell_buf);

			if ((scn_addr < start) || (scn_addr >= (start + size)))
				continue;

			nid = of_node_to_nid_single(memory);
			break;
		}
1146

1147 1148 1149
		of_node_put(memory);
		if (nid >= 0)
			break;
1150 1151
	}

1152
	return nid;
1153 1154
}

1155 1156 1157 1158 1159 1160 1161 1162
/*
 * Find the node associated with a hot added memory section.  Section
 * corresponds to a SPARSEMEM section, not an LMB.  It is assumed that
 * sections are fully contained within a single LMB.
 */
int hot_add_scn_to_nid(unsigned long scn_addr)
{
	struct device_node *memory = NULL;
1163
	int nid, found = 0;
1164 1165

	if (!numa_enabled || (min_common_depth < 0))
1166
		return first_online_node;
1167 1168 1169 1170 1171

	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (memory) {
		nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
		of_node_put(memory);
1172 1173
	} else {
		nid = hot_add_node_scn_to_nid(scn_addr);
1174
	}
1175

1176
	if (nid < 0 || !node_online(nid))
1177
		nid = first_online_node;
1178

1179 1180
	if (NODE_DATA(nid)->node_spanned_pages)
		return nid;
1181

1182 1183 1184 1185
	for_each_online_node(nid) {
		if (NODE_DATA(nid)->node_spanned_pages) {
			found = 1;
			break;
1186 1187
		}
	}
1188 1189 1190

	BUG_ON(!found);
	return nid;
1191
}
1192

1193
#endif /* CONFIG_MEMORY_HOTPLUG */