numa.c 39.7 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * pSeries NUMA support
 *
 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
 *
 * This program is free software; you can redistribute it and/or
 * modify it under the terms of the GNU General Public License
 * as published by the Free Software Foundation; either version
 * 2 of the License, or (at your option) any later version.
 */
11 12
#define pr_fmt(fmt) "numa: " fmt

L
Linus Torvalds 已提交
13 14 15 16 17
#include <linux/threads.h>
#include <linux/bootmem.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
18
#include <linux/export.h>
L
Linus Torvalds 已提交
19 20 21
#include <linux/nodemask.h>
#include <linux/cpu.h>
#include <linux/notifier.h>
Y
Yinghai Lu 已提交
22
#include <linux/memblock.h>
23
#include <linux/of.h>
24
#include <linux/pfn.h>
25 26
#include <linux/cpuset.h>
#include <linux/node.h>
27
#include <linux/stop_machine.h>
28 29 30
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
31
#include <linux/slab.h>
32
#include <asm/cputhreads.h>
33
#include <asm/sparsemem.h>
34
#include <asm/prom.h>
P
Paul Mackerras 已提交
35
#include <asm/smp.h>
36 37
#include <asm/cputhreads.h>
#include <asm/topology.h>
38 39
#include <asm/firmware.h>
#include <asm/paca.h>
40
#include <asm/hvcall.h>
41
#include <asm/setup.h>
42
#include <asm/vdso.h>
L
Linus Torvalds 已提交
43 44 45

static int numa_enabled = 1;

46 47
static char *cmdline __initdata;

L
Linus Torvalds 已提交
48 49 50
static int numa_debug;
#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }

51
int numa_cpu_lookup_table[NR_CPUS];
52
cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
L
Linus Torvalds 已提交
53
struct pglist_data *node_data[MAX_NUMNODES];
54 55

EXPORT_SYMBOL(numa_cpu_lookup_table);
56
EXPORT_SYMBOL(node_to_cpumask_map);
57 58
EXPORT_SYMBOL(node_data);

L
Linus Torvalds 已提交
59
static int min_common_depth;
60
static int n_mem_addr_cells, n_mem_size_cells;
61 62 63 64
static int form1_affinity;

#define MAX_DISTANCE_REF_POINTS 4
static int distance_ref_points_depth;
65
static const __be32 *distance_ref_points;
66
static int distance_lookup_table[MAX_NUMNODES][MAX_DISTANCE_REF_POINTS];
L
Linus Torvalds 已提交
67

68 69 70 71
/*
 * Allocate node_to_cpumask_map based on number of available nodes
 * Requires node_possible_map to be valid.
 *
72
 * Note: cpumask_of_node() is not valid until after this is done.
73 74 75
 */
static void __init setup_node_to_cpumask_map(void)
{
76
	unsigned int node;
77 78

	/* setup nr_node_ids if not done yet */
79 80
	if (nr_node_ids == MAX_NUMNODES)
		setup_nr_node_ids();
81 82

	/* allocate the map */
83
	for_each_node(node)
84 85 86 87 88 89
		alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);

	/* cpumask_of_node() will now work */
	dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
}

90
static int __init fake_numa_create_new_node(unsigned long end_pfn,
91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136
						unsigned int *nid)
{
	unsigned long long mem;
	char *p = cmdline;
	static unsigned int fake_nid;
	static unsigned long long curr_boundary;

	/*
	 * Modify node id, iff we started creating NUMA nodes
	 * We want to continue from where we left of the last time
	 */
	if (fake_nid)
		*nid = fake_nid;
	/*
	 * In case there are no more arguments to parse, the
	 * node_id should be the same as the last fake node id
	 * (we've handled this above).
	 */
	if (!p)
		return 0;

	mem = memparse(p, &p);
	if (!mem)
		return 0;

	if (mem < curr_boundary)
		return 0;

	curr_boundary = mem;

	if ((end_pfn << PAGE_SHIFT) > mem) {
		/*
		 * Skip commas and spaces
		 */
		while (*p == ',' || *p == ' ' || *p == '\t')
			p++;

		cmdline = p;
		fake_nid++;
		*nid = fake_nid;
		dbg("created new fake_node with id %d\n", fake_nid);
		return 1;
	}
	return 0;
}

137 138 139 140 141 142 143 144 145
static void reset_numa_cpu_lookup_table(void)
{
	unsigned int cpu;

	for_each_possible_cpu(cpu)
		numa_cpu_lookup_table[cpu] = -1;
}

static void update_numa_cpu_lookup_table(unsigned int cpu, int node)
L
Linus Torvalds 已提交
146 147
{
	numa_cpu_lookup_table[cpu] = node;
148 149 150 151 152
}

static void map_cpu_to_node(int cpu, int node)
{
	update_numa_cpu_lookup_table(cpu, node);
153

154 155
	dbg("adding cpu %d to node %d\n", cpu, node);

156 157
	if (!(cpumask_test_cpu(cpu, node_to_cpumask_map[node])))
		cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
L
Linus Torvalds 已提交
158 159
}

160
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR)
L
Linus Torvalds 已提交
161 162 163 164 165 166
static void unmap_cpu_from_node(unsigned long cpu)
{
	int node = numa_cpu_lookup_table[cpu];

	dbg("removing cpu %lu from node %d\n", cpu, node);

167
	if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) {
168
		cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
L
Linus Torvalds 已提交
169 170 171 172 173
	} else {
		printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
		       cpu, node);
	}
}
174
#endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */
L
Linus Torvalds 已提交
175 176

/* must hold reference to node during call */
177
static const __be32 *of_get_associativity(struct device_node *dev)
L
Linus Torvalds 已提交
178
{
179
	return of_get_property(dev, "ibm,associativity", NULL);
L
Linus Torvalds 已提交
180 181
}

182 183 184 185 186
/*
 * Returns the property linux,drconf-usable-memory if
 * it exists (the property exists only in kexec/kdump kernels,
 * added by kexec-tools)
 */
187
static const __be32 *of_get_usable_memory(void)
188
{
189
	struct device_node *memory;
190
	const __be32 *prop;
191
	u32 len;
192 193 194 195 196

	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (!memory)
		return NULL;

197
	prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
198 199
	of_node_put(memory);

200
	if (!prop || len < sizeof(unsigned int))
201
		return NULL;
202 203 204
	return prop;
}

205 206 207 208 209 210
int __node_distance(int a, int b)
{
	int i;
	int distance = LOCAL_DISTANCE;

	if (!form1_affinity)
211
		return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
212 213 214 215 216 217 218 219 220 221 222

	for (i = 0; i < distance_ref_points_depth; i++) {
		if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
			break;

		/* Double the distance for each NUMA level */
		distance *= 2;
	}

	return distance;
}
223
EXPORT_SYMBOL(__node_distance);
224 225

static void initialize_distance_lookup_table(int nid,
226
		const __be32 *associativity)
227 228 229 230 231 232 233
{
	int i;

	if (!form1_affinity)
		return;

	for (i = 0; i < distance_ref_points_depth; i++) {
234 235
		const __be32 *entry;

236
		entry = &associativity[be32_to_cpu(distance_ref_points[i]) - 1];
237
		distance_lookup_table[nid][i] = of_read_number(entry, 1);
238 239 240
	}
}

241 242 243
/* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
 * info is found.
 */
244
static int associativity_to_nid(const __be32 *associativity)
L
Linus Torvalds 已提交
245
{
246
	int nid = -1;
L
Linus Torvalds 已提交
247 248

	if (min_common_depth == -1)
249
		goto out;
L
Linus Torvalds 已提交
250

251 252
	if (of_read_number(associativity, 1) >= min_common_depth)
		nid = of_read_number(&associativity[min_common_depth], 1);
253 254

	/* POWER4 LPAR uses 0xffff as invalid node */
255 256
	if (nid == 0xffff || nid >= MAX_NUMNODES)
		nid = -1;
257

258
	if (nid > 0 &&
259 260 261 262 263 264
		of_read_number(associativity, 1) >= distance_ref_points_depth) {
		/*
		 * Skip the length field and send start of associativity array
		 */
		initialize_distance_lookup_table(nid, associativity + 1);
	}
265

266
out:
267
	return nid;
L
Linus Torvalds 已提交
268 269
}

270 271 272 273 274 275
/* Returns the nid associated with the given device tree node,
 * or -1 if not found.
 */
static int of_node_to_nid_single(struct device_node *device)
{
	int nid = -1;
276
	const __be32 *tmp;
277 278 279 280 281 282 283

	tmp = of_get_associativity(device);
	if (tmp)
		nid = associativity_to_nid(tmp);
	return nid;
}

284 285 286 287 288 289 290 291 292 293 294
/* Walk the device tree upwards, looking for an associativity id */
int of_node_to_nid(struct device_node *device)
{
	int nid = -1;

	of_node_get(device);
	while (device) {
		nid = of_node_to_nid_single(device);
		if (nid != -1)
			break;

295
		device = of_get_next_parent(device);
296 297 298 299 300
	}
	of_node_put(device);

	return nid;
}
301
EXPORT_SYMBOL(of_node_to_nid);
302

L
Linus Torvalds 已提交
303 304
static int __init find_min_common_depth(void)
{
305
	int depth;
306
	struct device_node *root;
L
Linus Torvalds 已提交
307

308 309 310 311
	if (firmware_has_feature(FW_FEATURE_OPAL))
		root = of_find_node_by_path("/ibm,opal");
	else
		root = of_find_node_by_path("/rtas");
312 313
	if (!root)
		root = of_find_node_by_path("/");
L
Linus Torvalds 已提交
314 315

	/*
316 317 318 319 320 321 322 323 324 325
	 * This property is a set of 32-bit integers, each representing
	 * an index into the ibm,associativity nodes.
	 *
	 * With form 0 affinity the first integer is for an SMP configuration
	 * (should be all 0's) and the second is for a normal NUMA
	 * configuration. We have only one level of NUMA.
	 *
	 * With form 1 affinity the first integer is the most significant
	 * NUMA boundary and the following are progressively less significant
	 * boundaries. There can be more than one level of NUMA.
L
Linus Torvalds 已提交
326
	 */
327
	distance_ref_points = of_get_property(root,
328 329 330 331 332 333 334 335 336
					"ibm,associativity-reference-points",
					&distance_ref_points_depth);

	if (!distance_ref_points) {
		dbg("NUMA: ibm,associativity-reference-points not found.\n");
		goto err;
	}

	distance_ref_points_depth /= sizeof(int);
L
Linus Torvalds 已提交
337

338 339 340
	if (firmware_has_feature(FW_FEATURE_OPAL) ||
	    firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
		dbg("Using form 1 affinity\n");
341
		form1_affinity = 1;
342 343
	}

344
	if (form1_affinity) {
345
		depth = of_read_number(distance_ref_points, 1);
L
Linus Torvalds 已提交
346
	} else {
347 348 349 350 351 352
		if (distance_ref_points_depth < 2) {
			printk(KERN_WARNING "NUMA: "
				"short ibm,associativity-reference-points\n");
			goto err;
		}

353
		depth = of_read_number(&distance_ref_points[1], 1);
L
Linus Torvalds 已提交
354 355
	}

356 357 358 359 360 361 362 363 364 365
	/*
	 * Warn and cap if the hardware supports more than
	 * MAX_DISTANCE_REF_POINTS domains.
	 */
	if (distance_ref_points_depth > MAX_DISTANCE_REF_POINTS) {
		printk(KERN_WARNING "NUMA: distance array capped at "
			"%d entries\n", MAX_DISTANCE_REF_POINTS);
		distance_ref_points_depth = MAX_DISTANCE_REF_POINTS;
	}

366
	of_node_put(root);
L
Linus Torvalds 已提交
367
	return depth;
368 369

err:
370
	of_node_put(root);
371
	return -1;
L
Linus Torvalds 已提交
372 373
}

374
static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
L
Linus Torvalds 已提交
375 376 377 378
{
	struct device_node *memory = NULL;

	memory = of_find_node_by_type(memory, "memory");
379
	if (!memory)
380
		panic("numa.c: No memory nodes found!");
381

382
	*n_addr_cells = of_n_addr_cells(memory);
383
	*n_size_cells = of_n_size_cells(memory);
384
	of_node_put(memory);
L
Linus Torvalds 已提交
385 386
}

387
static unsigned long read_n_cells(int n, const __be32 **buf)
L
Linus Torvalds 已提交
388 389 390 391
{
	unsigned long result = 0;

	while (n--) {
392
		result = (result << 32) | of_read_number(*buf, 1);
L
Linus Torvalds 已提交
393 394 395 396 397
		(*buf)++;
	}
	return result;
}

398
/*
Y
Yinghai Lu 已提交
399
 * Read the next memblock list entry from the ibm,dynamic-memory property
400 401
 * and return the information in the provided of_drconf_cell structure.
 */
402
static void read_drconf_cell(struct of_drconf_cell *drmem, const __be32 **cellp)
403
{
404
	const __be32 *cp;
405 406 407 408

	drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);

	cp = *cellp;
409 410 411 412
	drmem->drc_index = of_read_number(cp, 1);
	drmem->reserved = of_read_number(&cp[1], 1);
	drmem->aa_index = of_read_number(&cp[2], 1);
	drmem->flags = of_read_number(&cp[3], 1);
413 414 415 416 417

	*cellp = cp + 4;
}

/*
L
Lucas De Marchi 已提交
418
 * Retrieve and validate the ibm,dynamic-memory property of the device tree.
419
 *
Y
Yinghai Lu 已提交
420 421
 * The layout of the ibm,dynamic-memory property is a number N of memblock
 * list entries followed by N memblock list entries.  Each memblock list entry
L
Lucas De Marchi 已提交
422
 * contains information as laid out in the of_drconf_cell struct above.
423
 */
424
static int of_get_drconf_memory(struct device_node *memory, const __be32 **dm)
425
{
426
	const __be32 *prop;
427 428 429 430 431 432
	u32 len, entries;

	prop = of_get_property(memory, "ibm,dynamic-memory", &len);
	if (!prop || len < sizeof(unsigned int))
		return 0;

433
	entries = of_read_number(prop++, 1);
434 435 436 437 438 439 440 441 442 443 444 445

	/* Now that we know the number of entries, revalidate the size
	 * of the property read in to ensure we have everything
	 */
	if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
		return 0;

	*dm = prop;
	return entries;
}

/*
L
Lucas De Marchi 已提交
446
 * Retrieve and validate the ibm,lmb-size property for drconf memory
447 448
 * from the device tree.
 */
449
static u64 of_get_lmb_size(struct device_node *memory)
450
{
451
	const __be32 *prop;
452 453
	u32 len;

454
	prop = of_get_property(memory, "ibm,lmb-size", &len);
455 456 457 458 459 460 461 462 463
	if (!prop || len < sizeof(unsigned int))
		return 0;

	return read_n_cells(n_mem_size_cells, &prop);
}

struct assoc_arrays {
	u32	n_arrays;
	u32	array_sz;
464
	const __be32 *arrays;
465 466 467
};

/*
L
Lucas De Marchi 已提交
468
 * Retrieve and validate the list of associativity arrays for drconf
469 470 471 472 473 474 475 476
 * memory from the ibm,associativity-lookup-arrays property of the
 * device tree..
 *
 * The layout of the ibm,associativity-lookup-arrays property is a number N
 * indicating the number of associativity arrays, followed by a number M
 * indicating the size of each associativity array, followed by a list
 * of N associativity arrays.
 */
477
static int of_get_assoc_arrays(struct assoc_arrays *aa)
478
{
479
	struct device_node *memory;
480
	const __be32 *prop;
481 482
	u32 len;

483 484 485 486
	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (!memory)
		return -1;

487
	prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
488 489
	if (!prop || len < 2 * sizeof(unsigned int)) {
		of_node_put(memory);
490
		return -1;
491
	}
492

493 494
	aa->n_arrays = of_read_number(prop++, 1);
	aa->array_sz = of_read_number(prop++, 1);
495

496 497
	of_node_put(memory);

498
	/* Now that we know the number of arrays and size of each array,
499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522
	 * revalidate the size of the property read in.
	 */
	if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
		return -1;

	aa->arrays = prop;
	return 0;
}

/*
 * This is like of_node_to_nid_single() for memory represented in the
 * ibm,dynamic-reconfiguration-memory node.
 */
static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
				   struct assoc_arrays *aa)
{
	int default_nid = 0;
	int nid = default_nid;
	int index;

	if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
	    !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
	    drmem->aa_index < aa->n_arrays) {
		index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
523
		nid = of_read_number(&aa->arrays[index], 1);
524 525 526

		if (nid == 0xffff || nid >= MAX_NUMNODES)
			nid = default_nid;
527 528 529 530 531 532

		if (nid > 0) {
			index = drmem->aa_index * aa->array_sz;
			initialize_distance_lookup_table(nid,
							&aa->arrays[index]);
		}
533 534 535 536 537
	}

	return nid;
}

L
Linus Torvalds 已提交
538 539 540 541
/*
 * Figure out to which domain a cpu belongs and stick it there.
 * Return the id of the domain used.
 */
542
static int numa_setup_cpu(unsigned long lcpu)
L
Linus Torvalds 已提交
543
{
544
	int nid = -1;
545 546 547 548 549 550 551 552 553 554 555 556 557
	struct device_node *cpu;

	/*
	 * If a valid cpu-to-node mapping is already available, use it
	 * directly instead of querying the firmware, since it represents
	 * the most recent mapping notified to us by the platform (eg: VPHN).
	 */
	if ((nid = numa_cpu_lookup_table[lcpu]) >= 0) {
		map_cpu_to_node(lcpu, nid);
		return nid;
	}

	cpu = of_get_cpu_node(lcpu, NULL);
L
Linus Torvalds 已提交
558 559 560

	if (!cpu) {
		WARN_ON(1);
561 562 563 564
		if (cpu_present(lcpu))
			goto out_present;
		else
			goto out;
L
Linus Torvalds 已提交
565 566
	}

567
	nid = of_node_to_nid_single(cpu);
L
Linus Torvalds 已提交
568

569
out_present:
570
	if (nid < 0 || !node_online(nid))
571
		nid = first_online_node;
L
Linus Torvalds 已提交
572

573
	map_cpu_to_node(lcpu, nid);
L
Linus Torvalds 已提交
574
	of_node_put(cpu);
575
out:
576
	return nid;
L
Linus Torvalds 已提交
577 578
}

579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599
static void verify_cpu_node_mapping(int cpu, int node)
{
	int base, sibling, i;

	/* Verify that all the threads in the core belong to the same node */
	base = cpu_first_thread_sibling(cpu);

	for (i = 0; i < threads_per_core; i++) {
		sibling = base + i;

		if (sibling == cpu || cpu_is_offline(sibling))
			continue;

		if (cpu_to_node(sibling) != node) {
			WARN(1, "CPU thread siblings %d and %d don't belong"
				" to the same node!\n", cpu, sibling);
			break;
		}
	}
}

600 601 602 603 604 605 606 607 608 609 610 611
/* Must run before sched domains notifier. */
static int ppc_numa_cpu_prepare(unsigned int cpu)
{
	int nid;

	nid = numa_setup_cpu(cpu);
	verify_cpu_node_mapping(cpu, nid);
	return 0;
}

static int ppc_numa_cpu_dead(unsigned int cpu)
{
L
Linus Torvalds 已提交
612
#ifdef CONFIG_HOTPLUG_CPU
613
	unmap_cpu_from_node(cpu);
L
Linus Torvalds 已提交
614
#endif
615
	return 0;
L
Linus Torvalds 已提交
616 617 618 619 620 621 622 623
}

/*
 * Check and possibly modify a memory region to enforce the memory limit.
 *
 * Returns the size the region should have to enforce the memory limit.
 * This will either be the original value of size, a truncated value,
 * or zero. If the returned value of size is 0 the region should be
L
Lucas De Marchi 已提交
624
 * discarded as it lies wholly above the memory limit.
L
Linus Torvalds 已提交
625
 */
626 627
static unsigned long __init numa_enforce_memory_limit(unsigned long start,
						      unsigned long size)
L
Linus Torvalds 已提交
628 629
{
	/*
Y
Yinghai Lu 已提交
630
	 * We use memblock_end_of_DRAM() in here instead of memory_limit because
L
Linus Torvalds 已提交
631
	 * we've already adjusted it for the limit and it takes care of
632 633
	 * having memory holes below the limit.  Also, in the case of
	 * iommu_is_off, memory_limit is not set but is implicitly enforced.
L
Linus Torvalds 已提交
634 635
	 */

Y
Yinghai Lu 已提交
636
	if (start + size <= memblock_end_of_DRAM())
L
Linus Torvalds 已提交
637 638
		return size;

Y
Yinghai Lu 已提交
639
	if (start >= memblock_end_of_DRAM())
L
Linus Torvalds 已提交
640 641
		return 0;

Y
Yinghai Lu 已提交
642
	return memblock_end_of_DRAM() - start;
L
Linus Torvalds 已提交
643 644
}

645 646 647 648
/*
 * Reads the counter for a given entry in
 * linux,drconf-usable-memory property
 */
649
static inline int __init read_usm_ranges(const __be32 **usm)
650 651
{
	/*
652
	 * For each lmb in ibm,dynamic-memory a corresponding
653 654 655 656 657 658 659
	 * entry in linux,drconf-usable-memory property contains
	 * a counter followed by that many (base, size) duple.
	 * read the counter from linux,drconf-usable-memory
	 */
	return read_n_cells(n_mem_size_cells, usm);
}

660 661 662 663 664 665
/*
 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
 * node.  This assumes n_mem_{addr,size}_cells have been set.
 */
static void __init parse_drconf_memory(struct device_node *memory)
{
666
	const __be32 *uninitialized_var(dm), *usm;
667
	unsigned int n, rc, ranges, is_kexec_kdump = 0;
668
	unsigned long lmb_size, base, size, sz;
669
	int nid;
670
	struct assoc_arrays aa = { .arrays = NULL };
671 672 673

	n = of_get_drconf_memory(memory, &dm);
	if (!n)
674 675
		return;

676 677
	lmb_size = of_get_lmb_size(memory);
	if (!lmb_size)
678 679
		return;

680
	rc = of_get_assoc_arrays(&aa);
681
	if (rc)
682 683
		return;

684
	/* check if this is a kexec/kdump kernel */
685
	usm = of_get_usable_memory();
686 687 688
	if (usm != NULL)
		is_kexec_kdump = 1;

689
	for (; n != 0; --n) {
690 691 692 693 694 695 696 697
		struct of_drconf_cell drmem;

		read_drconf_cell(&drmem, &dm);

		/* skip this block if the reserved bit is set in flags (0x80)
		   or if the block is not assigned to this partition (0x8) */
		if ((drmem.flags & DRCONF_MEM_RESERVED)
		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
698
			continue;
699

700
		base = drmem.base_addr;
701
		size = lmb_size;
702
		ranges = 1;
703

704 705 706 707 708 709 710 711 712 713 714 715 716
		if (is_kexec_kdump) {
			ranges = read_usm_ranges(&usm);
			if (!ranges) /* there are no (base, size) duple */
				continue;
		}
		do {
			if (is_kexec_kdump) {
				base = read_n_cells(n_mem_addr_cells, &usm);
				size = read_n_cells(n_mem_size_cells, &usm);
			}
			nid = of_drconf_to_nid_single(&drmem, &aa);
			fake_numa_create_new_node(
				((base + size) >> PAGE_SHIFT),
717
					   &nid);
718 719 720
			node_set_online(nid);
			sz = numa_enforce_memory_limit(base, size);
			if (sz)
721 722
				memblock_set_node(base, sz,
						  &memblock.memory, nid);
723
		} while (--ranges);
724 725 726
	}
}

L
Linus Torvalds 已提交
727 728
static int __init parse_numa_properties(void)
{
729
	struct device_node *memory;
730
	int default_nid = 0;
L
Linus Torvalds 已提交
731 732 733 734 735 736 737 738 739 740 741 742
	unsigned long i;

	if (numa_enabled == 0) {
		printk(KERN_WARNING "NUMA disabled by user\n");
		return -1;
	}

	min_common_depth = find_min_common_depth();

	if (min_common_depth < 0)
		return min_common_depth;

743 744
	dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);

L
Linus Torvalds 已提交
745
	/*
746 747 748
	 * Even though we connect cpus to numa domains later in SMP
	 * init, we need to know the node ids now. This is because
	 * each node to be onlined must have NODE_DATA etc backing it.
L
Linus Torvalds 已提交
749
	 */
750
	for_each_present_cpu(i) {
A
Anton Blanchard 已提交
751
		struct device_node *cpu;
752
		int nid;
L
Linus Torvalds 已提交
753

754
		cpu = of_get_cpu_node(i, NULL);
755
		BUG_ON(!cpu);
756
		nid = of_node_to_nid_single(cpu);
757
		of_node_put(cpu);
L
Linus Torvalds 已提交
758

759 760 761 762 763 764 765 766
		/*
		 * Don't fall back to default_nid yet -- we will plug
		 * cpus into nodes once the memory scan has discovered
		 * the topology.
		 */
		if (nid < 0)
			continue;
		node_set_online(nid);
L
Linus Torvalds 已提交
767 768
	}

769
	get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
770 771

	for_each_node_by_type(memory, "memory") {
L
Linus Torvalds 已提交
772 773
		unsigned long start;
		unsigned long size;
774
		int nid;
L
Linus Torvalds 已提交
775
		int ranges;
776
		const __be32 *memcell_buf;
L
Linus Torvalds 已提交
777 778
		unsigned int len;

779
		memcell_buf = of_get_property(memory,
780 781
			"linux,usable-memory", &len);
		if (!memcell_buf || len <= 0)
782
			memcell_buf = of_get_property(memory, "reg", &len);
L
Linus Torvalds 已提交
783 784 785
		if (!memcell_buf || len <= 0)
			continue;

786 787
		/* ranges in cell */
		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
L
Linus Torvalds 已提交
788 789
new_range:
		/* these are order-sensitive, and modify the buffer pointer */
790 791
		start = read_n_cells(n_mem_addr_cells, &memcell_buf);
		size = read_n_cells(n_mem_size_cells, &memcell_buf);
L
Linus Torvalds 已提交
792

793 794 795 796 797
		/*
		 * Assumption: either all memory nodes or none will
		 * have associativity properties.  If none, then
		 * everything goes to default_nid.
		 */
798
		nid = of_node_to_nid_single(memory);
799 800
		if (nid < 0)
			nid = default_nid;
801 802

		fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
803
		node_set_online(nid);
L
Linus Torvalds 已提交
804

805 806 807
		size = numa_enforce_memory_limit(start, size);
		if (size)
			memblock_set_node(start, size, &memblock.memory, nid);
L
Linus Torvalds 已提交
808 809 810 811 812

		if (--ranges)
			goto new_range;
	}

813
	/*
A
Anton Blanchard 已提交
814 815 816
	 * Now do the same thing for each MEMBLOCK listed in the
	 * ibm,dynamic-memory property in the
	 * ibm,dynamic-reconfiguration-memory node.
817 818 819 820 821
	 */
	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (memory)
		parse_drconf_memory(memory);

L
Linus Torvalds 已提交
822 823 824 825 826
	return 0;
}

static void __init setup_nonnuma(void)
{
Y
Yinghai Lu 已提交
827 828
	unsigned long top_of_ram = memblock_end_of_DRAM();
	unsigned long total_ram = memblock_phys_mem_size();
829
	unsigned long start_pfn, end_pfn;
830 831
	unsigned int nid = 0;
	struct memblock_region *reg;
L
Linus Torvalds 已提交
832

833
	printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
L
Linus Torvalds 已提交
834
	       top_of_ram, total_ram);
835
	printk(KERN_DEBUG "Memory hole size: %ldMB\n",
L
Linus Torvalds 已提交
836 837
	       (top_of_ram - total_ram) >> 20);

838
	for_each_memblock(memory, reg) {
839 840
		start_pfn = memblock_region_memory_base_pfn(reg);
		end_pfn = memblock_region_memory_end_pfn(reg);
841 842

		fake_numa_create_new_node(end_pfn, &nid);
T
Tejun Heo 已提交
843
		memblock_set_node(PFN_PHYS(start_pfn),
844 845
				  PFN_PHYS(end_pfn - start_pfn),
				  &memblock.memory, nid);
846
		node_set_online(nid);
847
	}
L
Linus Torvalds 已提交
848 849
}

850 851 852 853 854 855 856 857 858
void __init dump_numa_cpu_topology(void)
{
	unsigned int node;
	unsigned int cpu, count;

	if (min_common_depth == -1 || !numa_enabled)
		return;

	for_each_online_node(node) {
859
		pr_info("Node %d CPUs:", node);
860 861 862 863 864 865

		count = 0;
		/*
		 * If we used a CPU iterator here we would miss printing
		 * the holes in the cpumap.
		 */
866 867 868
		for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
			if (cpumask_test_cpu(cpu,
					node_to_cpumask_map[node])) {
869
				if (count == 0)
870
					pr_cont(" %u", cpu);
871 872 873
				++count;
			} else {
				if (count > 1)
874
					pr_cont("-%u", cpu - 1);
875 876 877 878 879
				count = 0;
			}
		}

		if (count > 1)
880 881
			pr_cont("-%u", nr_cpu_ids - 1);
		pr_cont("\n");
882 883 884
	}
}

885 886
/* Initialize NODE_DATA for a node on the local memory */
static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
887
{
888 889 890 891 892
	u64 spanned_pages = end_pfn - start_pfn;
	const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
	u64 nd_pa;
	void *nd;
	int tnid;
893

894 895
	nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
	nd = __va(nd_pa);
896

897 898 899 900 901 902
	/* report and initialize */
	pr_info("  NODE_DATA [mem %#010Lx-%#010Lx]\n",
		nd_pa, nd_pa + nd_size - 1);
	tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
	if (tnid != nid)
		pr_info("    NODE_DATA(%d) on node %d\n", nid, tnid);
903

904 905 906 907 908 909
	node_data[nid] = nd;
	memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
	NODE_DATA(nid)->node_id = nid;
	NODE_DATA(nid)->node_start_pfn = start_pfn;
	NODE_DATA(nid)->node_spanned_pages = spanned_pages;
}
910

911
void __init initmem_init(void)
L
Linus Torvalds 已提交
912
{
913
	int nid, cpu;
L
Linus Torvalds 已提交
914

Y
Yinghai Lu 已提交
915
	max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
L
Linus Torvalds 已提交
916 917 918 919 920
	max_pfn = max_low_pfn;

	if (parse_numa_properties())
		setup_nonnuma();

921 922
	memblock_dump_all();

923 924 925 926 927 928 929
	/*
	 * Reduce the possible NUMA nodes to the online NUMA nodes,
	 * since we do not support node hotplug. This ensures that  we
	 * lower the maximum NUMA node ID to what is actually present.
	 */
	nodes_and(node_possible_map, node_possible_map, node_online_map);

L
Linus Torvalds 已提交
930
	for_each_online_node(nid) {
931
		unsigned long start_pfn, end_pfn;
L
Linus Torvalds 已提交
932

933
		get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
934
		setup_node_data(nid, start_pfn, end_pfn);
935
		sparse_memory_present_with_active_regions(nid);
936
	}
937

938
	sparse_init();
939 940 941

	setup_node_to_cpumask_map();

942
	reset_numa_cpu_lookup_table();
943

944 945 946 947
	/*
	 * We need the numa_cpu_lookup_table to be accurate for all CPUs,
	 * even before we online them, so that we can use cpu_to_{node,mem}
	 * early in boot, cf. smp_prepare_cpus().
948 949
	 * _nocalls() + manual invocation is used because cpuhp is not yet
	 * initialized for the boot CPU.
950
	 */
T
Thomas Gleixner 已提交
951
	cpuhp_setup_state_nocalls(CPUHP_POWER_NUMA_PREPARE, "powerpc/numa:prepare",
952 953 954
				  ppc_numa_cpu_prepare, ppc_numa_cpu_dead);
	for_each_present_cpu(cpu)
		numa_setup_cpu(cpu);
L
Linus Torvalds 已提交
955 956 957 958 959 960 961 962 963 964 965 966 967
}

static int __init early_numa(char *p)
{
	if (!p)
		return 0;

	if (strstr(p, "off"))
		numa_enabled = 0;

	if (strstr(p, "debug"))
		numa_debug = 1;

968 969 970 971
	p = strstr(p, "fake=");
	if (p)
		cmdline = p + strlen("fake=");

L
Linus Torvalds 已提交
972 973 974
	return 0;
}
early_param("numa", early_numa);
975

976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
static bool topology_updates_enabled = true;

static int __init early_topology_updates(char *p)
{
	if (!p)
		return 0;

	if (!strcmp(p, "off")) {
		pr_info("Disabling topology updates\n");
		topology_updates_enabled = false;
	}

	return 0;
}
early_param("topology_updates", early_topology_updates);

992
#ifdef CONFIG_MEMORY_HOTPLUG
993
/*
994 995 996
 * Find the node associated with a hot added memory section for
 * memory represented in the device tree by the property
 * ibm,dynamic-reconfiguration-memory/ibm,dynamic-memory.
997 998 999 1000
 */
static int hot_add_drconf_scn_to_nid(struct device_node *memory,
				     unsigned long scn_addr)
{
1001
	const __be32 *dm;
1002
	unsigned int drconf_cell_cnt, rc;
1003
	unsigned long lmb_size;
1004
	struct assoc_arrays aa;
1005
	int nid = -1;
1006

1007 1008 1009
	drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
	if (!drconf_cell_cnt)
		return -1;
1010

1011 1012
	lmb_size = of_get_lmb_size(memory);
	if (!lmb_size)
1013
		return -1;
1014

1015
	rc = of_get_assoc_arrays(&aa);
1016
	if (rc)
1017
		return -1;
1018

1019
	for (; drconf_cell_cnt != 0; --drconf_cell_cnt) {
1020 1021 1022 1023 1024 1025 1026 1027 1028 1029
		struct of_drconf_cell drmem;

		read_drconf_cell(&drmem, &dm);

		/* skip this block if it is reserved or not assigned to
		 * this partition */
		if ((drmem.flags & DRCONF_MEM_RESERVED)
		    || !(drmem.flags & DRCONF_MEM_ASSIGNED))
			continue;

1030
		if ((scn_addr < drmem.base_addr)
1031
		    || (scn_addr >= (drmem.base_addr + lmb_size)))
1032 1033
			continue;

1034
		nid = of_drconf_to_nid_single(&drmem, &aa);
1035 1036 1037 1038 1039 1040 1041 1042 1043
		break;
	}

	return nid;
}

/*
 * Find the node associated with a hot added memory section for memory
 * represented in the device tree as a node (i.e. memory@XXXX) for
Y
Yinghai Lu 已提交
1044
 * each memblock.
1045
 */
1046
static int hot_add_node_scn_to_nid(unsigned long scn_addr)
1047
{
1048
	struct device_node *memory;
1049 1050
	int nid = -1;

1051
	for_each_node_by_type(memory, "memory") {
1052 1053
		unsigned long start, size;
		int ranges;
1054
		const __be32 *memcell_buf;
1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073
		unsigned int len;

		memcell_buf = of_get_property(memory, "reg", &len);
		if (!memcell_buf || len <= 0)
			continue;

		/* ranges in cell */
		ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);

		while (ranges--) {
			start = read_n_cells(n_mem_addr_cells, &memcell_buf);
			size = read_n_cells(n_mem_size_cells, &memcell_buf);

			if ((scn_addr < start) || (scn_addr >= (start + size)))
				continue;

			nid = of_node_to_nid_single(memory);
			break;
		}
1074

1075 1076
		if (nid >= 0)
			break;
1077 1078
	}

1079 1080
	of_node_put(memory);

1081
	return nid;
1082 1083
}

1084 1085
/*
 * Find the node associated with a hot added memory section.  Section
Y
Yinghai Lu 已提交
1086 1087
 * corresponds to a SPARSEMEM section, not an MEMBLOCK.  It is assumed that
 * sections are fully contained within a single MEMBLOCK.
1088 1089 1090 1091
 */
int hot_add_scn_to_nid(unsigned long scn_addr)
{
	struct device_node *memory = NULL;
1092
	int nid;
1093 1094

	if (!numa_enabled || (min_common_depth < 0))
1095
		return first_online_node;
1096 1097 1098 1099 1100

	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (memory) {
		nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
		of_node_put(memory);
1101 1102
	} else {
		nid = hot_add_node_scn_to_nid(scn_addr);
1103
	}
1104

1105
	if (nid < 0 || !node_possible(nid))
1106
		nid = first_online_node;
1107

1108
	return nid;
1109
}
1110

1111 1112
static u64 hot_add_drconf_memory_max(void)
{
1113
	struct device_node *memory = NULL;
1114
	struct device_node *dn = NULL;
1115 1116
	unsigned int drconf_cell_cnt = 0;
	u64 lmb_size = 0;
1117
	const __be32 *dm = NULL;
1118 1119 1120 1121 1122 1123 1124 1125 1126 1127
	const __be64 *lrdr = NULL;
	struct of_drconf_cell drmem;

	dn = of_find_node_by_path("/rtas");
	if (dn) {
		lrdr = of_get_property(dn, "ibm,lrdr-capacity", NULL);
		of_node_put(dn);
		if (lrdr)
			return be64_to_cpup(lrdr);
	}
1128

1129 1130 1131 1132
	memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (memory) {
		drconf_cell_cnt = of_get_drconf_memory(memory, &dm);
		lmb_size = of_get_lmb_size(memory);
1133

1134 1135 1136
		/* Advance to the last cell, each cell has 6 32 bit integers */
		dm += (drconf_cell_cnt - 1) * 6;
		read_drconf_cell(&drmem, &dm);
1137
		of_node_put(memory);
1138
		return drmem.base_addr + lmb_size;
1139
	}
1140
	return 0;
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
}

/*
 * memory_hotplug_max - return max address of memory that may be added
 *
 * This is currently only used on systems that support drconfig memory
 * hotplug.
 */
u64 memory_hotplug_max(void)
{
        return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM());
}
1153
#endif /* CONFIG_MEMORY_HOTPLUG */
1154

1155
/* Virtual Processor Home Node (VPHN) support */
1156
#ifdef CONFIG_PPC_SPLPAR
1157 1158 1159

#include "vphn.h"

1160 1161 1162 1163 1164 1165 1166
struct topology_update_data {
	struct topology_update_data *next;
	unsigned int cpu;
	int old_nid;
	int new_nid;
};

1167 1168
#define TOPOLOGY_DEF_TIMER_SECS	60

1169
static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
1170 1171
static cpumask_t cpu_associativity_changes_mask;
static int vphn_enabled;
1172 1173
static int prrn_enabled;
static void reset_topology_timer(void);
1174
static int topology_timer_secs = 1;
1175 1176
static int topology_inited;
static int topology_update_needed;
1177

1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193
/*
 * Change polling interval for associativity changes.
 */
int timed_topology_update(int nsecs)
{
	if (vphn_enabled) {
		if (nsecs > 0)
			topology_timer_secs = nsecs;
		else
			topology_timer_secs = TOPOLOGY_DEF_TIMER_SECS;

		reset_topology_timer();
	}

	return 0;
}
1194 1195 1196 1197 1198 1199 1200

/*
 * Store the current values of the associativity change counters in the
 * hypervisor.
 */
static void setup_cpu_associativity_change_counters(void)
{
1201
	int cpu;
1202

1203 1204 1205
	/* The VPHN feature supports a maximum of 8 reference points */
	BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8);

1206
	for_each_possible_cpu(cpu) {
1207
		int i;
1208 1209 1210
		u8 *counts = vphn_cpu_change_counts[cpu];
		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;

1211
		for (i = 0; i < distance_ref_points_depth; i++)
1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228
			counts[i] = hypervisor_counts[i];
	}
}

/*
 * The hypervisor maintains a set of 8 associativity change counters in
 * the VPA of each cpu that correspond to the associativity levels in the
 * ibm,associativity-reference-points property. When an associativity
 * level changes, the corresponding counter is incremented.
 *
 * Set a bit in cpu_associativity_changes_mask for each cpu whose home
 * node associativity levels have changed.
 *
 * Returns the number of cpus with unhandled associativity changes.
 */
static int update_cpu_associativity_changes_mask(void)
{
1229
	int cpu;
1230 1231 1232 1233 1234 1235 1236
	cpumask_t *changes = &cpu_associativity_changes_mask;

	for_each_possible_cpu(cpu) {
		int i, changed = 0;
		u8 *counts = vphn_cpu_change_counts[cpu];
		volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts;

1237
		for (i = 0; i < distance_ref_points_depth; i++) {
1238
			if (hypervisor_counts[i] != counts[i]) {
1239 1240 1241 1242 1243
				counts[i] = hypervisor_counts[i];
				changed = 1;
			}
		}
		if (changed) {
1244 1245
			cpumask_or(changes, changes, cpu_sibling_mask(cpu));
			cpu = cpu_last_thread_sibling(cpu);
1246 1247 1248
		}
	}

1249
	return cpumask_weight(changes);
1250 1251 1252 1253 1254 1255
}

/*
 * Retrieve the new associativity information for a virtual processor's
 * home node.
 */
1256
static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1257
{
1258
	long rc;
1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269
	long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
	u64 flags = 1;
	int hwcpu = get_hard_smp_processor_id(cpu);

	rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
	vphn_unpack_associativity(retbuf, associativity);

	return rc;
}

static long vphn_get_associativity(unsigned long cpu,
1270
					__be32 *associativity)
1271
{
1272
	long rc;
1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286

	rc = hcall_vphn(cpu, associativity);

	switch (rc) {
	case H_FUNCTION:
		printk(KERN_INFO
			"VPHN is not supported. Disabling polling...\n");
		stop_topology_update();
		break;
	case H_HARDWARE:
		printk(KERN_ERR
			"hcall_vphn() experienced a hardware fault "
			"preventing VPHN. Disabling polling...\n");
		stop_topology_update();
1287 1288 1289
		break;
	case H_SUCCESS:
		dbg("VPHN hcall succeeded. Reset polling...\n");
1290
		timed_topology_update(0);
1291
		break;
1292 1293 1294 1295 1296
	}

	return rc;
}

1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
/*
 * Update the CPU maps and sysfs entries for a single CPU when its NUMA
 * characteristics change. This function doesn't perform any locking and is
 * only safe to call from stop_machine().
 */
static int update_cpu_topology(void *data)
{
	struct topology_update_data *update;
	unsigned long cpu;

	if (!data)
		return -EINVAL;

1310
	cpu = smp_processor_id();
1311 1312

	for (update = data; update; update = update->next) {
1313
		int new_nid = update->new_nid;
1314 1315 1316
		if (cpu != update->cpu)
			continue;

1317
		unmap_cpu_from_node(cpu);
1318 1319 1320
		map_cpu_to_node(cpu, new_nid);
		set_cpu_numa_node(cpu, new_nid);
		set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1321
		vdso_getcpu_init();
1322 1323 1324 1325 1326
	}

	return 0;
}

1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353
static int update_lookup_table(void *data)
{
	struct topology_update_data *update;

	if (!data)
		return -EINVAL;

	/*
	 * Upon topology update, the numa-cpu lookup table needs to be updated
	 * for all threads in the core, including offline CPUs, to ensure that
	 * future hotplug operations respect the cpu-to-node associativity
	 * properly.
	 */
	for (update = data; update; update = update->next) {
		int nid, base, j;

		nid = update->new_nid;
		base = cpu_first_thread_sibling(update->cpu);

		for (j = 0; j < threads_per_core; j++) {
			update_numa_cpu_lookup_table(base + j, nid);
		}
	}

	return 0;
}

1354 1355
/*
 * Update the node maps and sysfs entries for each cpu whose home node
1356
 * has changed. Returns 1 when the topology has changed, and 0 otherwise.
1357 1358
 *
 * cpus_locked says whether we already hold cpu_hotplug_lock.
1359
 */
1360
int numa_update_cpu_topology(bool cpus_locked)
1361
{
1362
	unsigned int cpu, sibling, changed = 0;
1363
	struct topology_update_data *updates, *ud;
1364
	__be32 associativity[VPHN_ASSOC_BUFSIZE] = {0};
1365
	cpumask_t updated_cpus;
1366
	struct device *dev;
1367
	int weight, new_nid, i = 0;
1368

1369 1370 1371
	if (!prrn_enabled && !vphn_enabled) {
		if (!topology_inited)
			topology_update_needed = 1;
1372
		return 0;
1373
	}
1374

1375 1376 1377 1378 1379 1380 1381
	weight = cpumask_weight(&cpu_associativity_changes_mask);
	if (!weight)
		return 0;

	updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
	if (!updates)
		return 0;
1382

1383 1384
	cpumask_clear(&updated_cpus);

1385
	for_each_cpu(cpu, &cpu_associativity_changes_mask) {
1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400
		/*
		 * If siblings aren't flagged for changes, updates list
		 * will be too short. Skip on this update and set for next
		 * update.
		 */
		if (!cpumask_subset(cpu_sibling_mask(cpu),
					&cpu_associativity_changes_mask)) {
			pr_info("Sibling bits not set for associativity "
					"change, cpu%d\n", cpu);
			cpumask_or(&cpu_associativity_changes_mask,
					&cpu_associativity_changes_mask,
					cpu_sibling_mask(cpu));
			cpu = cpu_last_thread_sibling(cpu);
			continue;
		}
1401

1402 1403 1404 1405 1406 1407 1408 1409 1410 1411
		/* Use associativity from first thread for all siblings */
		vphn_get_associativity(cpu, associativity);
		new_nid = associativity_to_nid(associativity);
		if (new_nid < 0 || !node_online(new_nid))
			new_nid = first_online_node;

		if (new_nid == numa_cpu_lookup_table[cpu]) {
			cpumask_andnot(&cpu_associativity_changes_mask,
					&cpu_associativity_changes_mask,
					cpu_sibling_mask(cpu));
1412 1413
			dbg("Assoc chg gives same node %d for cpu%d\n",
					new_nid, cpu);
1414 1415 1416
			cpu = cpu_last_thread_sibling(cpu);
			continue;
		}
1417

1418 1419
		for_each_cpu(sibling, cpu_sibling_mask(cpu)) {
			ud = &updates[i++];
1420
			ud->next = &updates[i];
1421 1422 1423 1424 1425 1426
			ud->cpu = sibling;
			ud->new_nid = new_nid;
			ud->old_nid = numa_cpu_lookup_table[sibling];
			cpumask_set_cpu(sibling, &updated_cpus);
		}
		cpu = cpu_last_thread_sibling(cpu);
1427 1428
	}

1429 1430 1431 1432 1433 1434 1435
	/*
	 * Prevent processing of 'updates' from overflowing array
	 * where last entry filled in a 'next' pointer.
	 */
	if (i)
		updates[i-1].next = NULL;

1436 1437 1438 1439 1440 1441 1442 1443 1444
	pr_debug("Topology update for the following CPUs:\n");
	if (cpumask_weight(&updated_cpus)) {
		for (ud = &updates[0]; ud; ud = ud->next) {
			pr_debug("cpu %d moving from node %d "
					  "to %d\n", ud->cpu,
					  ud->old_nid, ud->new_nid);
		}
	}

1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458
	/*
	 * In cases where we have nothing to update (because the updates list
	 * is too short or because the new topology is same as the old one),
	 * skip invoking update_cpu_topology() via stop-machine(). This is
	 * necessary (and not just a fast-path optimization) since stop-machine
	 * can end up electing a random CPU to run update_cpu_topology(), and
	 * thus trick us into setting up incorrect cpu-node mappings (since
	 * 'updates' is kzalloc()'ed).
	 *
	 * And for the similar reason, we will skip all the following updating.
	 */
	if (!cpumask_weight(&updated_cpus))
		goto out;

1459 1460 1461 1462 1463
	if (cpus_locked)
		stop_machine_cpuslocked(update_cpu_topology, &updates[0],
					&updated_cpus);
	else
		stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
1464

1465 1466 1467 1468 1469
	/*
	 * Update the numa-cpu lookup table with the new mappings, even for
	 * offline CPUs. It is best to perform this update from the stop-
	 * machine context.
	 */
1470 1471
	if (cpus_locked)
		stop_machine_cpuslocked(update_lookup_table, &updates[0],
1472
					cpumask_of(raw_smp_processor_id()));
1473 1474 1475
	else
		stop_machine(update_lookup_table, &updates[0],
			     cpumask_of(raw_smp_processor_id()));
1476

1477
	for (ud = &updates[0]; ud; ud = ud->next) {
1478 1479 1480
		unregister_cpu_under_node(ud->cpu, ud->old_nid);
		register_cpu_under_node(ud->cpu, ud->new_nid);

1481
		dev = get_cpu_device(ud->cpu);
1482 1483
		if (dev)
			kobject_uevent(&dev->kobj, KOBJ_CHANGE);
1484
		cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
1485
		changed = 1;
1486 1487
	}

1488
out:
1489
	kfree(updates);
1490
	topology_update_needed = 0;
1491
	return changed;
1492 1493
}

1494 1495 1496 1497 1498
int arch_update_cpu_topology(void)
{
	return numa_update_cpu_topology(true);
}

1499 1500 1501 1502 1503 1504
static void topology_work_fn(struct work_struct *work)
{
	rebuild_sched_domains();
}
static DECLARE_WORK(topology_work, topology_work_fn);

1505
static void topology_schedule_update(void)
1506 1507 1508 1509
{
	schedule_work(&topology_work);
}

1510
static void topology_timer_fn(struct timer_list *unused)
1511
{
1512
	if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
1513
		topology_schedule_update();
1514 1515 1516 1517 1518
	else if (vphn_enabled) {
		if (update_cpu_associativity_changes_mask() > 0)
			topology_schedule_update();
		reset_topology_timer();
	}
1519
}
1520
static struct timer_list topology_timer;
1521

1522
static void reset_topology_timer(void)
1523
{
1524
	mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
1525 1526
}

1527 1528
#ifdef CONFIG_SMP

1529 1530 1531 1532 1533 1534 1535 1536 1537 1538
static void stage_topology_update(int core_id)
{
	cpumask_or(&cpu_associativity_changes_mask,
		&cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
	reset_topology_timer();
}

static int dt_update_callback(struct notifier_block *nb,
				unsigned long action, void *data)
{
1539
	struct of_reconfig_data *update = data;
1540 1541 1542 1543
	int rc = NOTIFY_DONE;

	switch (action) {
	case OF_RECONFIG_UPDATE_PROPERTY:
1544 1545
		if (!of_prop_cmp(update->dn->type, "cpu") &&
		    !of_prop_cmp(update->prop->name, "ibm,associativity")) {
1546 1547 1548 1549 1550 1551 1552 1553 1554
			u32 core_id;
			of_property_read_u32(update->dn, "reg", &core_id);
			stage_topology_update(core_id);
			rc = NOTIFY_OK;
		}
		break;
	}

	return rc;
1555 1556
}

1557 1558 1559 1560
static struct notifier_block dt_update_nb = {
	.notifier_call = dt_update_callback,
};

1561 1562
#endif

1563
/*
1564
 * Start polling for associativity changes.
1565 1566 1567 1568 1569
 */
int start_topology_update(void)
{
	int rc = 0;

1570 1571 1572
	if (firmware_has_feature(FW_FEATURE_PRRN)) {
		if (!prrn_enabled) {
			prrn_enabled = 1;
1573
#ifdef CONFIG_SMP
1574
			rc = of_reconfig_notifier_register(&dt_update_nb);
1575
#endif
1576
		}
1577 1578
	}
	if (firmware_has_feature(FW_FEATURE_VPHN) &&
1579
		   lppaca_shared_proc(get_lppaca())) {
1580 1581 1582
		if (!vphn_enabled) {
			vphn_enabled = 1;
			setup_cpu_associativity_change_counters();
1583 1584
			timer_setup(&topology_timer, topology_timer_fn,
				    TIMER_DEFERRABLE);
1585 1586
			reset_topology_timer();
		}
1587 1588 1589 1590 1591 1592 1593 1594 1595 1596
	}

	return rc;
}

/*
 * Disable polling for VPHN associativity changes.
 */
int stop_topology_update(void)
{
1597 1598 1599 1600
	int rc = 0;

	if (prrn_enabled) {
		prrn_enabled = 0;
1601
#ifdef CONFIG_SMP
1602
		rc = of_reconfig_notifier_unregister(&dt_update_nb);
1603
#endif
1604 1605
	}
	if (vphn_enabled) {
1606 1607 1608 1609 1610
		vphn_enabled = 0;
		rc = del_timer_sync(&topology_timer);
	}

	return rc;
1611
}
1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663

int prrn_is_enabled(void)
{
	return prrn_enabled;
}

static int topology_read(struct seq_file *file, void *v)
{
	if (vphn_enabled || prrn_enabled)
		seq_puts(file, "on\n");
	else
		seq_puts(file, "off\n");

	return 0;
}

static int topology_open(struct inode *inode, struct file *file)
{
	return single_open(file, topology_read, NULL);
}

static ssize_t topology_write(struct file *file, const char __user *buf,
			      size_t count, loff_t *off)
{
	char kbuf[4]; /* "on" or "off" plus null. */
	int read_len;

	read_len = count < 3 ? count : 3;
	if (copy_from_user(kbuf, buf, read_len))
		return -EINVAL;

	kbuf[read_len] = '\0';

	if (!strncmp(kbuf, "on", 2))
		start_topology_update();
	else if (!strncmp(kbuf, "off", 3))
		stop_topology_update();
	else
		return -EINVAL;

	return count;
}

static const struct file_operations topology_ops = {
	.read = seq_read,
	.write = topology_write,
	.open = topology_open,
	.release = single_release
};

static int topology_update_init(void)
{
1664 1665 1666 1667
	/* Do not poll for changes if disabled at boot */
	if (topology_updates_enabled)
		start_topology_update();

1668 1669 1670
	if (vphn_enabled)
		topology_schedule_update();

1671 1672
	if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
		return -ENOMEM;
1673

1674 1675 1676 1677 1678
	topology_inited = 1;
	if (topology_update_needed)
		bitmap_fill(cpumask_bits(&cpu_associativity_changes_mask),
					nr_cpumask_bits);

1679
	return 0;
1680
}
1681
device_initcall(topology_update_init);
1682
#endif /* CONFIG_PPC_SPLPAR */