hotplug-memory.c 22.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11
/*
 * pseries Memory Hotplug infrastructure.
 *
 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

12 13
#define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt

14
#include <linux/of.h>
R
Rob Herring 已提交
15
#include <linux/of_address.h>
Y
Yinghai Lu 已提交
16
#include <linux/memblock.h>
17
#include <linux/memory.h>
18
#include <linux/memory_hotplug.h>
19
#include <linux/slab.h>
20

21 22
#include <asm/firmware.h>
#include <asm/machdep.h>
R
Rob Herring 已提交
23
#include <asm/prom.h>
24
#include <asm/sparsemem.h>
25
#include <asm/fadump.h>
26
#include <asm/drmem.h>
27
#include "pseries.h"
28

29 30
static bool rtas_hp_event;

31
unsigned long pseries_memory_block_size(void)
32 33
{
	struct device_node *np;
34 35
	unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
	struct resource r;
36 37 38

	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (np) {
39
		const __be64 *size;
40 41

		size = of_get_property(np, "ibm,lmb-size", NULL);
42 43
		if (size)
			memblock_size = be64_to_cpup(size);
44
		of_node_put(np);
45 46
	} else  if (machine_is(pseries)) {
		/* This fallback really only applies to pseries */
47 48 49 50
		unsigned int memzero_size = 0;

		np = of_find_node_by_path("/memory@0");
		if (np) {
51 52
			if (!of_address_to_resource(np, 0, &r))
				memzero_size = resource_size(&r);
53 54 55 56 57 58 59 60 61 62 63 64
			of_node_put(np);
		}

		if (memzero_size) {
			/* We now know the size of memory@0, use this to find
			 * the first memoryblock and get its size.
			 */
			char buf[64];

			sprintf(buf, "/memory@%x", memzero_size);
			np = of_find_node_by_path(buf);
			if (np) {
65 66
				if (!of_address_to_resource(np, 0, &r))
					memblock_size = resource_size(&r);
67 68 69 70 71 72 73
				of_node_put(np);
			}
		}
	}
	return memblock_size;
}

74
static void dlpar_free_property(struct property *prop)
75 76 77 78 79 80
{
	kfree(prop->name);
	kfree(prop->value);
	kfree(prop);
}

81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103
static struct property *dlpar_clone_property(struct property *prop,
					     u32 prop_size)
{
	struct property *new_prop;

	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
	if (!new_prop)
		return NULL;

	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
	if (!new_prop->name || !new_prop->value) {
		dlpar_free_property(new_prop);
		return NULL;
	}

	memcpy(new_prop->value, prop->value, prop->length);
	new_prop->length = prop_size;

	of_property_set_flag(new_prop, OF_DYNAMIC);
	return new_prop;
}

104 105 106
static bool find_aa_index(struct device_node *dr_node,
			 struct property *ala_prop,
			 const u32 *lmb_assoc, u32 *aa_index)
107
{
108 109
	u32 *assoc_arrays, new_prop_size;
	struct property *new_prop;
110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130
	int aa_arrays, aa_array_entries, aa_array_sz;
	int i, index;

	/*
	 * The ibm,associativity-lookup-arrays property is defined to be
	 * a 32-bit value specifying the number of associativity arrays
	 * followed by a 32-bitvalue specifying the number of entries per
	 * array, followed by the associativity arrays.
	 */
	assoc_arrays = ala_prop->value;

	aa_arrays = be32_to_cpu(assoc_arrays[0]);
	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
	aa_array_sz = aa_array_entries * sizeof(u32);

	for (i = 0; i < aa_arrays; i++) {
		index = (i * aa_array_entries) + 2;

		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
			continue;

131 132
		*aa_index = i;
		return true;
133 134
	}

135 136 137 138
	new_prop_size = ala_prop->length + aa_array_sz;
	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
	if (!new_prop)
		return false;
139

140
	assoc_arrays = new_prop->value;
141

142 143
	/* increment the number of entries in the lookup array */
	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
144

145 146 147
	/* copy the new associativity into the lookup array */
	index = aa_arrays * aa_array_entries + 2;
	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
148

149
	of_update_property(dr_node, new_prop);
150

151 152 153 154 155 156 157
	/*
	 * The associativity lookup array index for this lmb is
	 * number of entries - 1 since we added its associativity
	 * to the end of the lookup array.
	 */
	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
	return true;
158 159
}

160
static int update_lmb_associativity_index(struct drmem_lmb *lmb)
161 162
{
	struct device_node *parent, *lmb_node, *dr_node;
163
	struct property *ala_prop;
164 165
	const u32 *lmb_assoc;
	u32 aa_index;
166
	bool found;
167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189

	parent = of_find_node_by_path("/");
	if (!parent)
		return -ENODEV;

	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
					     parent);
	of_node_put(parent);
	if (!lmb_node)
		return -EINVAL;

	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
	if (!lmb_assoc) {
		dlpar_free_cc_nodes(lmb_node);
		return -ENODEV;
	}

	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (!dr_node) {
		dlpar_free_cc_nodes(lmb_node);
		return -ENODEV;
	}

190 191 192 193
	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
				    NULL);
	if (!ala_prop) {
		of_node_put(dr_node);
194 195 196 197
		dlpar_free_cc_nodes(lmb_node);
		return -ENODEV;
	}

198
	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
199

200
	of_node_put(dr_node);
201 202
	dlpar_free_cc_nodes(lmb_node);

203
	if (!found) {
204 205
		pr_err("Could not find LMB associativity\n");
		return -1;
206 207 208
	}

	lmb->aa_index = aa_index;
209
	return 0;
210 211
}

212
static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
213 214 215 216 217 218 219 220 221 222 223 224
{
	unsigned long section_nr;
	struct mem_section *mem_sect;
	struct memory_block *mem_block;

	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
	mem_sect = __nr_to_section(section_nr);

	mem_block = find_memory_block(mem_sect);
	return mem_block;
}

225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254
static int get_lmb_range(u32 drc_index, int n_lmbs,
			 struct drmem_lmb **start_lmb,
			 struct drmem_lmb **end_lmb)
{
	struct drmem_lmb *lmb, *start, *end;
	struct drmem_lmb *last_lmb;

	start = NULL;
	for_each_drmem_lmb(lmb) {
		if (lmb->drc_index == drc_index) {
			start = lmb;
			break;
		}
	}

	if (!start)
		return -EINVAL;

	end = &start[n_lmbs - 1];

	last_lmb = &drmem_info->lmbs[drmem_info->n_lmbs - 1];
	if (end > last_lmb)
		return -EINVAL;

	*start_lmb = start;
	*end_lmb = end;
	return 0;
}

static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274
{
	struct memory_block *mem_block;
	int rc;

	mem_block = lmb_to_memblock(lmb);
	if (!mem_block)
		return -EINVAL;

	if (online && mem_block->dev.offline)
		rc = device_online(&mem_block->dev);
	else if (!online && !mem_block->dev.offline)
		rc = device_offline(&mem_block->dev);
	else
		rc = 0;

	put_device(&mem_block->dev);

	return rc;
}

275
static int dlpar_online_lmb(struct drmem_lmb *lmb)
276 277 278 279
{
	return dlpar_change_lmb_state(lmb, true);
}

280
#ifdef CONFIG_MEMORY_HOTREMOVE
281
static int dlpar_offline_lmb(struct drmem_lmb *lmb)
282 283 284 285
{
	return dlpar_change_lmb_state(lmb, false);
}

286 287 288 289 290
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
	unsigned long block_sz, start_pfn;
	int sections_per_block;
	int i, nid;
291

292
	start_pfn = base >> PAGE_SHIFT;
293

294 295 296 297
	lock_device_hotplug();

	if (!pfn_valid(start_pfn))
		goto out;
298

299
	block_sz = pseries_memory_block_size();
300 301
	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
	nid = memory_add_physaddr_to_nid(base);
302

303
	for (i = 0; i < sections_per_block; i++) {
304
		__remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
305
		base += MIN_MEMORY_BLOCK_SIZE;
306
	}
307

308
out:
309
	/* Update memory regions for memory remove */
Y
Yinghai Lu 已提交
310
	memblock_remove(base, memblock_size);
311
	unlock_device_hotplug();
312
	return 0;
313 314
}

315
static int pseries_remove_mem_node(struct device_node *np)
316
{
317
	const __be32 *regs;
318
	unsigned long base;
319
	unsigned int lmb_size;
320 321 322 323 324
	int ret = -EINVAL;

	/*
	 * Check to see if we are actually removing memory
	 */
325
	if (!of_node_is_type(np, "memory"))
326 327 328
		return 0;

	/*
329
	 * Find the base address and size of the memblock
330 331 332 333 334
	 */
	regs = of_get_property(np, "reg", NULL);
	if (!regs)
		return ret;

335 336
	base = be64_to_cpu(*(unsigned long *)regs);
	lmb_size = be32_to_cpu(regs[3]);
337

338 339
	pseries_remove_memblock(base, lmb_size);
	return 0;
340
}
341

342
static bool lmb_is_removable(struct drmem_lmb *lmb)
343 344 345 346 347 348 349 350 351 352 353 354 355
{
	int i, scns_per_block;
	int rc = 1;
	unsigned long pfn, block_sz;
	u64 phys_addr;

	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
		return false;

	block_sz = memory_block_size_bytes();
	scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
	phys_addr = lmb->base_addr;

356
#ifdef CONFIG_FA_DUMP
357 358 359 360 361
	/*
	 * Don't hot-remove memory that falls in fadump boot memory area
	 * and memory that is reserved for capturing old kernel memory.
	 */
	if (is_fadump_memory_area(phys_addr, block_sz))
362 363 364
		return false;
#endif

365 366 367 368 369 370 371 372 373 374 375 376
	for (i = 0; i < scns_per_block; i++) {
		pfn = PFN_DOWN(phys_addr);
		if (!pfn_present(pfn))
			continue;

		rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
		phys_addr += MIN_MEMORY_BLOCK_SIZE;
	}

	return rc ? true : false;
}

377
static int dlpar_add_lmb(struct drmem_lmb *);
378

379
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
380 381
{
	unsigned long block_sz;
382
	int rc;
383 384 385 386

	if (!lmb_is_removable(lmb))
		return -EINVAL;

387
	rc = dlpar_offline_lmb(lmb);
388 389 390 391 392
	if (rc)
		return rc;

	block_sz = pseries_memory_block_size();

393
	__remove_memory(lmb->nid, lmb->base_addr, block_sz);
394 395 396 397

	/* Update memory regions for memory remove */
	memblock_remove(lmb->base_addr, block_sz);

398
	invalidate_lmb_associativity_index(lmb);
399
	lmb_clear_nid(lmb);
400 401
	lmb->flags &= ~DRCONF_MEM_ASSIGNED;

402 403 404
	return 0;
}

405
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
406
{
407
	struct drmem_lmb *lmb;
408 409
	int lmbs_removed = 0;
	int lmbs_available = 0;
410
	int rc;
411 412 413 414 415 416 417

	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);

	if (lmbs_to_remove == 0)
		return -EINVAL;

	/* Validate that there are enough LMBs to satisfy the request */
418 419
	for_each_drmem_lmb(lmb) {
		if (lmb_is_removable(lmb))
420
			lmbs_available++;
421 422 423

		if (lmbs_available == lmbs_to_remove)
			break;
424 425
	}

426 427 428
	if (lmbs_available < lmbs_to_remove) {
		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
			lmbs_available, lmbs_to_remove);
429
		return -EINVAL;
430
	}
431

432 433
	for_each_drmem_lmb(lmb) {
		rc = dlpar_remove_lmb(lmb);
434 435 436 437 438 439
		if (rc)
			continue;

		/* Mark this lmb so we can add it later if all of the
		 * requested LMBs cannot be removed.
		 */
440 441 442 443 444
		drmem_mark_lmb_reserved(lmb);

		lmbs_removed++;
		if (lmbs_removed == lmbs_to_remove)
			break;
445 446 447 448 449
	}

	if (lmbs_removed != lmbs_to_remove) {
		pr_err("Memory hot-remove failed, adding LMB's back\n");

450 451
		for_each_drmem_lmb(lmb) {
			if (!drmem_lmb_reserved(lmb))
452 453
				continue;

454
			rc = dlpar_add_lmb(lmb);
455 456
			if (rc)
				pr_err("Failed to add LMB back, drc index %x\n",
457
				       lmb->drc_index);
458

459
			drmem_remove_lmb_reservation(lmb);
460 461 462 463
		}

		rc = -EINVAL;
	} else {
464 465
		for_each_drmem_lmb(lmb) {
			if (!drmem_lmb_reserved(lmb))
466 467
				continue;

468
			dlpar_release_drc(lmb->drc_index);
469
			pr_info("Memory at %llx was hot-removed\n",
470
				lmb->base_addr);
471

472
			drmem_remove_lmb_reservation(lmb);
473 474 475 476 477 478 479
		}
		rc = 0;
	}

	return rc;
}

480
static int dlpar_memory_remove_by_index(u32 drc_index)
481
{
482
	struct drmem_lmb *lmb;
483
	int lmb_found;
484
	int rc;
485 486 487 488

	pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);

	lmb_found = 0;
489 490
	for_each_drmem_lmb(lmb) {
		if (lmb->drc_index == drc_index) {
491
			lmb_found = 1;
492
			rc = dlpar_remove_lmb(lmb);
493
			if (!rc)
494
				dlpar_release_drc(lmb->drc_index);
495

496 497 498 499 500 501 502 503 504
			break;
		}
	}

	if (!lmb_found)
		rc = -EINVAL;

	if (rc)
		pr_info("Failed to hot-remove memory at %llx\n",
505
			lmb->base_addr);
506
	else
507
		pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
508 509 510 511

	return rc;
}

512
static int dlpar_memory_readd_by_index(u32 drc_index)
513
{
514
	struct drmem_lmb *lmb;
515
	int lmb_found;
516
	int rc;
517 518 519 520

	pr_info("Attempting to update LMB, drc index %x\n", drc_index);

	lmb_found = 0;
521 522
	for_each_drmem_lmb(lmb) {
		if (lmb->drc_index == drc_index) {
523
			lmb_found = 1;
524
			rc = dlpar_remove_lmb(lmb);
525
			if (!rc) {
526
				rc = dlpar_add_lmb(lmb);
527
				if (rc)
528
					dlpar_release_drc(lmb->drc_index);
529 530 531 532 533 534 535 536 537 538
			}
			break;
		}
	}

	if (!lmb_found)
		rc = -EINVAL;

	if (rc)
		pr_info("Failed to update memory at %llx\n",
539
			lmb->base_addr);
540
	else
541
		pr_info("Memory at %llx was updated\n", lmb->base_addr);
542 543 544

	return rc;
}
545

546
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
547
{
548 549 550
	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
	int lmbs_available = 0;
	int rc;
551 552 553 554 555 556 557

	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
		lmbs_to_remove, drc_index);

	if (lmbs_to_remove == 0)
		return -EINVAL;

558 559
	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
	if (rc)
560 561 562
		return -EINVAL;

	/* Validate that there are enough LMBs to satisfy the request */
563 564
	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
		if (lmb->flags & DRCONF_MEM_RESERVED)
565 566 567 568 569 570 571 572
			break;

		lmbs_available++;
	}

	if (lmbs_available < lmbs_to_remove)
		return -EINVAL;

573 574
	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
575 576
			continue;

577
		rc = dlpar_remove_lmb(lmb);
578 579 580
		if (rc)
			break;

581
		drmem_mark_lmb_reserved(lmb);
582 583 584 585 586
	}

	if (rc) {
		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");

587 588 589

		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
			if (!drmem_lmb_reserved(lmb))
590 591
				continue;

592
			rc = dlpar_add_lmb(lmb);
593 594
			if (rc)
				pr_err("Failed to add LMB, drc index %x\n",
595
				       lmb->drc_index);
596

597
			drmem_remove_lmb_reservation(lmb);
598 599 600
		}
		rc = -EINVAL;
	} else {
601 602
		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
			if (!drmem_lmb_reserved(lmb))
603 604
				continue;

605
			dlpar_release_drc(lmb->drc_index);
606
			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
607
				lmb->base_addr, lmb->drc_index);
608

609
			drmem_remove_lmb_reservation(lmb);
610 611 612 613 614 615
		}
	}

	return rc;
}

616 617 618 619 620 621
#else
static inline int pseries_remove_memblock(unsigned long base,
					  unsigned int memblock_size)
{
	return -EOPNOTSUPP;
}
622
static inline int pseries_remove_mem_node(struct device_node *np)
623
{
624
	return 0;
625
}
626 627 628 629
static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
{
	return -EOPNOTSUPP;
}
630
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
631 632 633
{
	return -EOPNOTSUPP;
}
634
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
635 636 637
{
	return -EOPNOTSUPP;
}
638
static int dlpar_memory_remove_by_index(u32 drc_index)
639 640 641
{
	return -EOPNOTSUPP;
}
642
static int dlpar_memory_readd_by_index(u32 drc_index)
643 644 645
{
	return -EOPNOTSUPP;
}
646

647
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
648 649 650
{
	return -EOPNOTSUPP;
}
651
#endif /* CONFIG_MEMORY_HOTREMOVE */
652

653
static int dlpar_add_lmb(struct drmem_lmb *lmb)
654 655
{
	unsigned long block_sz;
656
	int rc;
657

658 659 660
	if (lmb->flags & DRCONF_MEM_ASSIGNED)
		return -EINVAL;

661
	rc = update_lmb_associativity_index(lmb);
662 663 664 665 666
	if (rc) {
		dlpar_release_drc(lmb->drc_index);
		return rc;
	}

667
	lmb_set_nid(lmb);
668 669 670
	block_sz = memory_block_size_bytes();

	/* Add the memory */
671
	rc = __add_memory(lmb->nid, lmb->base_addr, block_sz);
672
	if (rc) {
673
		invalidate_lmb_associativity_index(lmb);
674 675 676 677 678
		return rc;
	}

	rc = dlpar_online_lmb(lmb);
	if (rc) {
679
		__remove_memory(lmb->nid, lmb->base_addr, block_sz);
680
		invalidate_lmb_associativity_index(lmb);
681
		lmb_clear_nid(lmb);
682
	} else {
683
		lmb->flags |= DRCONF_MEM_ASSIGNED;
684
	}
685 686 687 688

	return rc;
}

689
static int dlpar_memory_add_by_count(u32 lmbs_to_add)
690
{
691
	struct drmem_lmb *lmb;
692 693
	int lmbs_available = 0;
	int lmbs_added = 0;
694
	int rc;
695 696 697 698 699 700 701

	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);

	if (lmbs_to_add == 0)
		return -EINVAL;

	/* Validate that there are enough LMBs to satisfy the request */
702 703
	for_each_drmem_lmb(lmb) {
		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
704
			lmbs_available++;
705 706 707

		if (lmbs_available == lmbs_to_add)
			break;
708 709 710 711 712
	}

	if (lmbs_available < lmbs_to_add)
		return -EINVAL;

713 714
	for_each_drmem_lmb(lmb) {
		if (lmb->flags & DRCONF_MEM_ASSIGNED)
715 716
			continue;

717
		rc = dlpar_acquire_drc(lmb->drc_index);
718 719 720
		if (rc)
			continue;

721
		rc = dlpar_add_lmb(lmb);
722
		if (rc) {
723
			dlpar_release_drc(lmb->drc_index);
724 725 726
			continue;
		}

727 728 729
		/* Mark this lmb so we can remove it later if all of the
		 * requested LMBs cannot be added.
		 */
730 731 732 733 734
		drmem_mark_lmb_reserved(lmb);

		lmbs_added++;
		if (lmbs_added == lmbs_to_add)
			break;
735 736 737
	}

	if (lmbs_added != lmbs_to_add) {
738 739
		pr_err("Memory hot-add failed, removing any added LMBs\n");

740 741
		for_each_drmem_lmb(lmb) {
			if (!drmem_lmb_reserved(lmb))
742 743
				continue;

744
			rc = dlpar_remove_lmb(lmb);
745 746
			if (rc)
				pr_err("Failed to remove LMB, drc index %x\n",
747
				       lmb->drc_index);
748
			else
749 750 751
				dlpar_release_drc(lmb->drc_index);

			drmem_remove_lmb_reservation(lmb);
752
		}
753 754
		rc = -EINVAL;
	} else {
755 756
		for_each_drmem_lmb(lmb) {
			if (!drmem_lmb_reserved(lmb))
757 758 759
				continue;

			pr_info("Memory at %llx (drc index %x) was hot-added\n",
760 761
				lmb->base_addr, lmb->drc_index);
			drmem_remove_lmb_reservation(lmb);
762
		}
763
		rc = 0;
764 765 766 767 768
	}

	return rc;
}

769
static int dlpar_memory_add_by_index(u32 drc_index)
770
{
771 772
	struct drmem_lmb *lmb;
	int rc, lmb_found;
773 774 775 776

	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);

	lmb_found = 0;
777 778
	for_each_drmem_lmb(lmb) {
		if (lmb->drc_index == drc_index) {
779
			lmb_found = 1;
780
			rc = dlpar_acquire_drc(lmb->drc_index);
781
			if (!rc) {
782
				rc = dlpar_add_lmb(lmb);
783
				if (rc)
784
					dlpar_release_drc(lmb->drc_index);
785 786
			}

787 788 789 790 791 792 793 794 795 796 797
			break;
		}
	}

	if (!lmb_found)
		rc = -EINVAL;

	if (rc)
		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
	else
		pr_info("Memory at %llx (drc index %x) was hot-added\n",
798
			lmb->base_addr, drc_index);
799 800 801 802

	return rc;
}

803
static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
804
{
805 806 807
	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
	int lmbs_available = 0;
	int rc;
808 809 810 811 812 813 814

	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
		lmbs_to_add, drc_index);

	if (lmbs_to_add == 0)
		return -EINVAL;

815 816
	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
	if (rc)
817 818 819
		return -EINVAL;

	/* Validate that the LMBs in this range are not reserved */
820 821
	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
		if (lmb->flags & DRCONF_MEM_RESERVED)
822 823 824 825 826 827 828 829
			break;

		lmbs_available++;
	}

	if (lmbs_available < lmbs_to_add)
		return -EINVAL;

830 831
	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
		if (lmb->flags & DRCONF_MEM_ASSIGNED)
832 833
			continue;

834
		rc = dlpar_acquire_drc(lmb->drc_index);
835 836 837
		if (rc)
			break;

838
		rc = dlpar_add_lmb(lmb);
839
		if (rc) {
840
			dlpar_release_drc(lmb->drc_index);
841 842 843
			break;
		}

844
		drmem_mark_lmb_reserved(lmb);
845 846 847 848 849
	}

	if (rc) {
		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");

850 851
		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
			if (!drmem_lmb_reserved(lmb))
852 853
				continue;

854
			rc = dlpar_remove_lmb(lmb);
855 856
			if (rc)
				pr_err("Failed to remove LMB, drc index %x\n",
857
				       lmb->drc_index);
858
			else
859 860 861
				dlpar_release_drc(lmb->drc_index);

			drmem_remove_lmb_reservation(lmb);
862 863 864
		}
		rc = -EINVAL;
	} else {
865 866
		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
			if (!drmem_lmb_reserved(lmb))
867 868 869
				continue;

			pr_info("Memory at %llx (drc index %x) was hot-added\n",
870 871
				lmb->base_addr, lmb->drc_index);
			drmem_remove_lmb_reservation(lmb);
872 873 874 875 876 877
		}
	}

	return rc;
}

878 879
int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
{
880 881 882
	u32 count, drc_index;
	int rc;

883 884 885
	lock_device_hotplug();

	switch (hp_elog->action) {
886
	case PSERIES_HP_ELOG_ACTION_ADD:
887 888
		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
			count = hp_elog->_drc_u.drc_count;
889
			rc = dlpar_memory_add_by_count(count);
890 891
		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
			drc_index = hp_elog->_drc_u.drc_index;
892
			rc = dlpar_memory_add_by_index(drc_index);
893 894 895
		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
			count = hp_elog->_drc_u.ic.count;
			drc_index = hp_elog->_drc_u.ic.index;
896
			rc = dlpar_memory_add_by_ic(count, drc_index);
897
		} else {
898
			rc = -EINVAL;
899 900
		}

901
		break;
902
	case PSERIES_HP_ELOG_ACTION_REMOVE:
903 904
		if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT) {
			count = hp_elog->_drc_u.drc_count;
905
			rc = dlpar_memory_remove_by_count(count);
906 907
		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX) {
			drc_index = hp_elog->_drc_u.drc_index;
908
			rc = dlpar_memory_remove_by_index(drc_index);
909 910 911
		} else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_IC) {
			count = hp_elog->_drc_u.ic.count;
			drc_index = hp_elog->_drc_u.ic.index;
912
			rc = dlpar_memory_remove_by_ic(count, drc_index);
913
		} else {
914
			rc = -EINVAL;
915 916
		}

917
		break;
918
	case PSERIES_HP_ELOG_ACTION_READD:
919
		drc_index = hp_elog->_drc_u.drc_index;
920
		rc = dlpar_memory_readd_by_index(drc_index);
921
		break;
922 923 924 925 926 927
	default:
		pr_err("Invalid action (%d) specified\n", hp_elog->action);
		rc = -EINVAL;
		break;
	}

928 929 930 931 932 933
	if (!rc) {
		rtas_hp_event = true;
		rc = drmem_update_dt();
		rtas_hp_event = false;
	}

934 935 936 937
	unlock_device_hotplug();
	return rc;
}

938
static int pseries_add_mem_node(struct device_node *np)
939
{
940
	const __be32 *regs;
941
	unsigned long base;
942
	unsigned int lmb_size;
943 944 945 946 947
	int ret = -EINVAL;

	/*
	 * Check to see if we are actually adding memory
	 */
948
	if (!of_node_is_type(np, "memory"))
949 950 951
		return 0;

	/*
Y
Yinghai Lu 已提交
952
	 * Find the base and size of the memblock
953 954 955 956 957
	 */
	regs = of_get_property(np, "reg", NULL);
	if (!regs)
		return ret;

958 959
	base = be64_to_cpu(*(unsigned long *)regs);
	lmb_size = be32_to_cpu(regs[3]);
960 961 962 963

	/*
	 * Update memory region to represent the memory add
	 */
964
	ret = memblock_add(base, lmb_size);
965 966 967
	return (ret < 0) ? -EINVAL : 0;
}

968
static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
969
{
970
	struct of_drconf_cell_v1 *new_drmem, *old_drmem;
971
	unsigned long memblock_size;
972
	u32 entries;
973
	__be32 *p;
974
	int i, rc = -EINVAL;
975

976 977 978
	if (rtas_hp_event)
		return 0;

979
	memblock_size = pseries_memory_block_size();
980
	if (!memblock_size)
981 982
		return -EINVAL;

983
	p = (__be32 *) pr->old_prop->value;
984 985 986 987 988
	if (!p)
		return -EINVAL;

	/* The first int of the property is the number of lmb's described
	 * by the property. This is followed by an array of of_drconf_cell
989
	 * entries. Get the number of entries and skip to the array of
990 991
	 * of_drconf_cell's.
	 */
992
	entries = be32_to_cpu(*p++);
993
	old_drmem = (struct of_drconf_cell_v1 *)p;
994

995
	p = (__be32 *)pr->prop->value;
996
	p++;
997
	new_drmem = (struct of_drconf_cell_v1 *)p;
998 999

	for (i = 0; i < entries; i++) {
1000 1001 1002 1003
		if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) &&
		    (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) {
			rc = pseries_remove_memblock(
				be64_to_cpu(old_drmem[i].base_addr),
1004 1005
						     memblock_size);
			break;
1006 1007 1008 1009 1010
		} else if ((!(be32_to_cpu(old_drmem[i].flags) &
			    DRCONF_MEM_ASSIGNED)) &&
			    (be32_to_cpu(new_drmem[i].flags) &
			    DRCONF_MEM_ASSIGNED)) {
			rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr),
1011 1012 1013 1014
					  memblock_size);
			rc = (rc < 0) ? -EINVAL : 0;
			break;
		}
1015 1016
	}
	return rc;
1017 1018
}

1019
static int pseries_memory_notifier(struct notifier_block *nb,
1020
				   unsigned long action, void *data)
1021
{
1022
	struct of_reconfig_data *rd = data;
1023
	int err = 0;
1024 1025

	switch (action) {
1026
	case OF_RECONFIG_ATTACH_NODE:
1027
		err = pseries_add_mem_node(rd->dn);
1028
		break;
1029
	case OF_RECONFIG_DETACH_NODE:
1030
		err = pseries_remove_mem_node(rd->dn);
1031
		break;
1032
	case OF_RECONFIG_UPDATE_PROPERTY:
1033 1034
		if (!strcmp(rd->prop->name, "ibm,dynamic-memory"))
			err = pseries_update_drconf_memory(rd);
1035 1036
		break;
	}
1037
	return notifier_from_errno(err);
1038 1039 1040 1041 1042 1043 1044 1045 1046
}

static struct notifier_block pseries_mem_nb = {
	.notifier_call = pseries_memory_notifier,
};

static int __init pseries_memory_hotplug_init(void)
{
	if (firmware_has_feature(FW_FEATURE_LPAR))
1047
		of_reconfig_notifier_register(&pseries_mem_nb);
1048 1049 1050 1051

	return 0;
}
machine_device_initcall(pseries, pseries_memory_hotplug_init);