hotplug-memory.c 19.9 KB
Newer Older
1
// SPDX-License-Identifier: GPL-2.0-or-later
2 3 4 5 6 7
/*
 * pseries Memory Hotplug infrastructure.
 *
 * Copyright (C) 2008 Badari Pulavarty, IBM Corporation
 */

8 9
#define pr_fmt(fmt)	"pseries-hotplug-mem: " fmt

10
#include <linux/of.h>
R
Rob Herring 已提交
11
#include <linux/of_address.h>
Y
Yinghai Lu 已提交
12
#include <linux/memblock.h>
13
#include <linux/memory.h>
14
#include <linux/memory_hotplug.h>
15
#include <linux/slab.h>
16

17 18
#include <asm/firmware.h>
#include <asm/machdep.h>
R
Rob Herring 已提交
19
#include <asm/prom.h>
20
#include <asm/sparsemem.h>
21
#include <asm/fadump.h>
22
#include <asm/drmem.h>
23
#include "pseries.h"
24

25
unsigned long pseries_memory_block_size(void)
26 27
{
	struct device_node *np;
28
	u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
29
	struct resource r;
30 31 32

	np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (np) {
33
		const __be64 *size;
34 35

		size = of_get_property(np, "ibm,lmb-size", NULL);
36 37
		if (size)
			memblock_size = be64_to_cpup(size);
38
		of_node_put(np);
39 40
	} else  if (machine_is(pseries)) {
		/* This fallback really only applies to pseries */
41 42 43 44
		unsigned int memzero_size = 0;

		np = of_find_node_by_path("/memory@0");
		if (np) {
45 46
			if (!of_address_to_resource(np, 0, &r))
				memzero_size = resource_size(&r);
47 48 49 50 51 52 53 54 55 56 57 58
			of_node_put(np);
		}

		if (memzero_size) {
			/* We now know the size of memory@0, use this to find
			 * the first memoryblock and get its size.
			 */
			char buf[64];

			sprintf(buf, "/memory@%x", memzero_size);
			np = of_find_node_by_path(buf);
			if (np) {
59 60
				if (!of_address_to_resource(np, 0, &r))
					memblock_size = resource_size(&r);
61 62 63 64 65 66 67
				of_node_put(np);
			}
		}
	}
	return memblock_size;
}

68
static void dlpar_free_property(struct property *prop)
69 70 71 72 73 74
{
	kfree(prop->name);
	kfree(prop->value);
	kfree(prop);
}

75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
static struct property *dlpar_clone_property(struct property *prop,
					     u32 prop_size)
{
	struct property *new_prop;

	new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
	if (!new_prop)
		return NULL;

	new_prop->name = kstrdup(prop->name, GFP_KERNEL);
	new_prop->value = kzalloc(prop_size, GFP_KERNEL);
	if (!new_prop->name || !new_prop->value) {
		dlpar_free_property(new_prop);
		return NULL;
	}

	memcpy(new_prop->value, prop->value, prop->length);
	new_prop->length = prop_size;

	of_property_set_flag(new_prop, OF_DYNAMIC);
	return new_prop;
}

98 99 100
static bool find_aa_index(struct device_node *dr_node,
			 struct property *ala_prop,
			 const u32 *lmb_assoc, u32 *aa_index)
101
{
102 103
	u32 *assoc_arrays, new_prop_size;
	struct property *new_prop;
104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
	int aa_arrays, aa_array_entries, aa_array_sz;
	int i, index;

	/*
	 * The ibm,associativity-lookup-arrays property is defined to be
	 * a 32-bit value specifying the number of associativity arrays
	 * followed by a 32-bitvalue specifying the number of entries per
	 * array, followed by the associativity arrays.
	 */
	assoc_arrays = ala_prop->value;

	aa_arrays = be32_to_cpu(assoc_arrays[0]);
	aa_array_entries = be32_to_cpu(assoc_arrays[1]);
	aa_array_sz = aa_array_entries * sizeof(u32);

	for (i = 0; i < aa_arrays; i++) {
		index = (i * aa_array_entries) + 2;

		if (memcmp(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz))
			continue;

125 126
		*aa_index = i;
		return true;
127 128
	}

129 130 131 132
	new_prop_size = ala_prop->length + aa_array_sz;
	new_prop = dlpar_clone_property(ala_prop, new_prop_size);
	if (!new_prop)
		return false;
133

134
	assoc_arrays = new_prop->value;
135

136 137
	/* increment the number of entries in the lookup array */
	assoc_arrays[0] = cpu_to_be32(aa_arrays + 1);
138

139 140 141
	/* copy the new associativity into the lookup array */
	index = aa_arrays * aa_array_entries + 2;
	memcpy(&assoc_arrays[index], &lmb_assoc[1], aa_array_sz);
142

143
	of_update_property(dr_node, new_prop);
144

145 146 147 148 149 150 151
	/*
	 * The associativity lookup array index for this lmb is
	 * number of entries - 1 since we added its associativity
	 * to the end of the lookup array.
	 */
	*aa_index = be32_to_cpu(assoc_arrays[0]) - 1;
	return true;
152 153
}

154
static int update_lmb_associativity_index(struct drmem_lmb *lmb)
155 156
{
	struct device_node *parent, *lmb_node, *dr_node;
157
	struct property *ala_prop;
158 159
	const u32 *lmb_assoc;
	u32 aa_index;
160
	bool found;
161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183

	parent = of_find_node_by_path("/");
	if (!parent)
		return -ENODEV;

	lmb_node = dlpar_configure_connector(cpu_to_be32(lmb->drc_index),
					     parent);
	of_node_put(parent);
	if (!lmb_node)
		return -EINVAL;

	lmb_assoc = of_get_property(lmb_node, "ibm,associativity", NULL);
	if (!lmb_assoc) {
		dlpar_free_cc_nodes(lmb_node);
		return -ENODEV;
	}

	dr_node = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
	if (!dr_node) {
		dlpar_free_cc_nodes(lmb_node);
		return -ENODEV;
	}

184 185 186 187
	ala_prop = of_find_property(dr_node, "ibm,associativity-lookup-arrays",
				    NULL);
	if (!ala_prop) {
		of_node_put(dr_node);
188 189 190 191
		dlpar_free_cc_nodes(lmb_node);
		return -ENODEV;
	}

192
	found = find_aa_index(dr_node, ala_prop, lmb_assoc, &aa_index);
193

194
	of_node_put(dr_node);
195 196
	dlpar_free_cc_nodes(lmb_node);

197
	if (!found) {
198 199
		pr_err("Could not find LMB associativity\n");
		return -1;
200 201 202
	}

	lmb->aa_index = aa_index;
203
	return 0;
204 205
}

206
static struct memory_block *lmb_to_memblock(struct drmem_lmb *lmb)
207 208 209 210 211 212 213 214 215 216 217 218
{
	unsigned long section_nr;
	struct mem_section *mem_sect;
	struct memory_block *mem_block;

	section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
	mem_sect = __nr_to_section(section_nr);

	mem_block = find_memory_block(mem_sect);
	return mem_block;
}

219 220 221 222 223
static int get_lmb_range(u32 drc_index, int n_lmbs,
			 struct drmem_lmb **start_lmb,
			 struct drmem_lmb **end_lmb)
{
	struct drmem_lmb *lmb, *start, *end;
224
	struct drmem_lmb *limit;
225 226 227 228 229 230 231 232 233 234 235 236

	start = NULL;
	for_each_drmem_lmb(lmb) {
		if (lmb->drc_index == drc_index) {
			start = lmb;
			break;
		}
	}

	if (!start)
		return -EINVAL;

237
	end = &start[n_lmbs];
238

239 240
	limit = &drmem_info->lmbs[drmem_info->n_lmbs];
	if (end > limit)
241 242 243 244 245 246 247 248
		return -EINVAL;

	*start_lmb = start;
	*end_lmb = end;
	return 0;
}

static int dlpar_change_lmb_state(struct drmem_lmb *lmb, bool online)
249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268
{
	struct memory_block *mem_block;
	int rc;

	mem_block = lmb_to_memblock(lmb);
	if (!mem_block)
		return -EINVAL;

	if (online && mem_block->dev.offline)
		rc = device_online(&mem_block->dev);
	else if (!online && !mem_block->dev.offline)
		rc = device_offline(&mem_block->dev);
	else
		rc = 0;

	put_device(&mem_block->dev);

	return rc;
}

269
static int dlpar_online_lmb(struct drmem_lmb *lmb)
270 271 272 273
{
	return dlpar_change_lmb_state(lmb, true);
}

274
#ifdef CONFIG_MEMORY_HOTREMOVE
275
static int dlpar_offline_lmb(struct drmem_lmb *lmb)
276 277 278 279
{
	return dlpar_change_lmb_state(lmb, false);
}

280 281 282 283 284
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
	unsigned long block_sz, start_pfn;
	int sections_per_block;
	int i, nid;
285

286
	start_pfn = base >> PAGE_SHIFT;
287

288 289 290 291
	lock_device_hotplug();

	if (!pfn_valid(start_pfn))
		goto out;
292

293
	block_sz = pseries_memory_block_size();
294 295
	sections_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
	nid = memory_add_physaddr_to_nid(base);
296

297
	for (i = 0; i < sections_per_block; i++) {
298
		__remove_memory(nid, base, MIN_MEMORY_BLOCK_SIZE);
299
		base += MIN_MEMORY_BLOCK_SIZE;
300
	}
301

302
out:
303
	/* Update memory regions for memory remove */
Y
Yinghai Lu 已提交
304
	memblock_remove(base, memblock_size);
305
	unlock_device_hotplug();
306
	return 0;
307 308
}

309
static int pseries_remove_mem_node(struct device_node *np)
310
{
311
	const __be32 *regs;
312
	unsigned long base;
313
	unsigned int lmb_size;
314 315 316 317 318
	int ret = -EINVAL;

	/*
	 * Check to see if we are actually removing memory
	 */
319
	if (!of_node_is_type(np, "memory"))
320 321 322
		return 0;

	/*
323
	 * Find the base address and size of the memblock
324 325 326 327 328
	 */
	regs = of_get_property(np, "reg", NULL);
	if (!regs)
		return ret;

329 330
	base = be64_to_cpu(*(unsigned long *)regs);
	lmb_size = be32_to_cpu(regs[3]);
331

332 333
	pseries_remove_memblock(base, lmb_size);
	return 0;
334
}
335

336
static bool lmb_is_removable(struct drmem_lmb *lmb)
337 338 339 340
{
	if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
		return false;

341
#ifdef CONFIG_FA_DUMP
342 343 344 345
	/*
	 * Don't hot-remove memory that falls in fadump boot memory area
	 * and memory that is reserved for capturing old kernel memory.
	 */
346
	if (is_fadump_memory_area(lmb->base_addr, memory_block_size_bytes()))
347 348
		return false;
#endif
349 350
	/* device_offline() will determine if we can actually remove this lmb */
	return true;
351 352
}

353
static int dlpar_add_lmb(struct drmem_lmb *);
354

355
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
356
{
357
	struct memory_block *mem_block;
358
	unsigned long block_sz;
359
	int rc;
360 361 362 363

	if (!lmb_is_removable(lmb))
		return -EINVAL;

364 365 366 367
	mem_block = lmb_to_memblock(lmb);
	if (mem_block == NULL)
		return -EINVAL;

368
	rc = dlpar_offline_lmb(lmb);
369 370
	if (rc) {
		put_device(&mem_block->dev);
371
		return rc;
372
	}
373 374 375

	block_sz = pseries_memory_block_size();

376 377
	__remove_memory(mem_block->nid, lmb->base_addr, block_sz);
	put_device(&mem_block->dev);
378 379 380 381

	/* Update memory regions for memory remove */
	memblock_remove(lmb->base_addr, block_sz);

382 383 384
	invalidate_lmb_associativity_index(lmb);
	lmb->flags &= ~DRCONF_MEM_ASSIGNED;

385 386 387
	return 0;
}

388
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
389
{
390
	struct drmem_lmb *lmb;
391 392
	int lmbs_removed = 0;
	int lmbs_available = 0;
393
	int rc;
394 395 396 397 398 399 400

	pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);

	if (lmbs_to_remove == 0)
		return -EINVAL;

	/* Validate that there are enough LMBs to satisfy the request */
401 402
	for_each_drmem_lmb(lmb) {
		if (lmb_is_removable(lmb))
403
			lmbs_available++;
404 405 406

		if (lmbs_available == lmbs_to_remove)
			break;
407 408
	}

409 410 411
	if (lmbs_available < lmbs_to_remove) {
		pr_info("Not enough LMBs available (%d of %d) to satisfy request\n",
			lmbs_available, lmbs_to_remove);
412
		return -EINVAL;
413
	}
414

415 416
	for_each_drmem_lmb(lmb) {
		rc = dlpar_remove_lmb(lmb);
417 418 419 420 421 422
		if (rc)
			continue;

		/* Mark this lmb so we can add it later if all of the
		 * requested LMBs cannot be removed.
		 */
423 424 425 426 427
		drmem_mark_lmb_reserved(lmb);

		lmbs_removed++;
		if (lmbs_removed == lmbs_to_remove)
			break;
428 429 430 431 432
	}

	if (lmbs_removed != lmbs_to_remove) {
		pr_err("Memory hot-remove failed, adding LMB's back\n");

433 434
		for_each_drmem_lmb(lmb) {
			if (!drmem_lmb_reserved(lmb))
435 436
				continue;

437
			rc = dlpar_add_lmb(lmb);
438 439
			if (rc)
				pr_err("Failed to add LMB back, drc index %x\n",
440
				       lmb->drc_index);
441

442
			drmem_remove_lmb_reservation(lmb);
443 444 445 446
		}

		rc = -EINVAL;
	} else {
447 448
		for_each_drmem_lmb(lmb) {
			if (!drmem_lmb_reserved(lmb))
449 450
				continue;

451
			dlpar_release_drc(lmb->drc_index);
452
			pr_info("Memory at %llx was hot-removed\n",
453
				lmb->base_addr);
454

455
			drmem_remove_lmb_reservation(lmb);
456 457 458 459 460 461 462
		}
		rc = 0;
	}

	return rc;
}

463
static int dlpar_memory_remove_by_index(u32 drc_index)
464
{
465
	struct drmem_lmb *lmb;
466
	int lmb_found;
467
	int rc;
468 469 470 471

	pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);

	lmb_found = 0;
472 473
	for_each_drmem_lmb(lmb) {
		if (lmb->drc_index == drc_index) {
474
			lmb_found = 1;
475
			rc = dlpar_remove_lmb(lmb);
476
			if (!rc)
477
				dlpar_release_drc(lmb->drc_index);
478

479 480 481 482 483 484 485 486 487
			break;
		}
	}

	if (!lmb_found)
		rc = -EINVAL;

	if (rc)
		pr_info("Failed to hot-remove memory at %llx\n",
488
			lmb->base_addr);
489
	else
490
		pr_info("Memory at %llx was hot-removed\n", lmb->base_addr);
491 492 493 494

	return rc;
}

495
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
496
{
497 498 499
	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
	int lmbs_available = 0;
	int rc;
500 501 502 503 504 505 506

	pr_info("Attempting to hot-remove %u LMB(s) at %x\n",
		lmbs_to_remove, drc_index);

	if (lmbs_to_remove == 0)
		return -EINVAL;

507 508
	rc = get_lmb_range(drc_index, lmbs_to_remove, &start_lmb, &end_lmb);
	if (rc)
509 510 511
		return -EINVAL;

	/* Validate that there are enough LMBs to satisfy the request */
512 513
	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
		if (lmb->flags & DRCONF_MEM_RESERVED)
514 515 516 517 518 519 520 521
			break;

		lmbs_available++;
	}

	if (lmbs_available < lmbs_to_remove)
		return -EINVAL;

522 523
	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
524 525
			continue;

526
		rc = dlpar_remove_lmb(lmb);
527 528 529
		if (rc)
			break;

530
		drmem_mark_lmb_reserved(lmb);
531 532 533 534 535
	}

	if (rc) {
		pr_err("Memory indexed-count-remove failed, adding any removed LMBs\n");

536 537 538

		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
			if (!drmem_lmb_reserved(lmb))
539 540
				continue;

541
			rc = dlpar_add_lmb(lmb);
542 543
			if (rc)
				pr_err("Failed to add LMB, drc index %x\n",
544
				       lmb->drc_index);
545

546
			drmem_remove_lmb_reservation(lmb);
547 548 549
		}
		rc = -EINVAL;
	} else {
550 551
		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
			if (!drmem_lmb_reserved(lmb))
552 553
				continue;

554
			dlpar_release_drc(lmb->drc_index);
555
			pr_info("Memory at %llx (drc index %x) was hot-removed\n",
556
				lmb->base_addr, lmb->drc_index);
557

558
			drmem_remove_lmb_reservation(lmb);
559 560 561 562 563 564
		}
	}

	return rc;
}

565 566 567 568 569 570
#else
static inline int pseries_remove_memblock(unsigned long base,
					  unsigned int memblock_size)
{
	return -EOPNOTSUPP;
}
571
static inline int pseries_remove_mem_node(struct device_node *np)
572
{
573
	return 0;
574
}
575 576 577 578
static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
{
	return -EOPNOTSUPP;
}
579
static int dlpar_remove_lmb(struct drmem_lmb *lmb)
580 581 582
{
	return -EOPNOTSUPP;
}
583
static int dlpar_memory_remove_by_count(u32 lmbs_to_remove)
584 585 586
{
	return -EOPNOTSUPP;
}
587
static int dlpar_memory_remove_by_index(u32 drc_index)
588 589 590
{
	return -EOPNOTSUPP;
}
591

592
static int dlpar_memory_remove_by_ic(u32 lmbs_to_remove, u32 drc_index)
593 594 595
{
	return -EOPNOTSUPP;
}
596
#endif /* CONFIG_MEMORY_HOTREMOVE */
597

598
static int dlpar_add_lmb(struct drmem_lmb *lmb)
599 600
{
	unsigned long block_sz;
601
	int nid, rc;
602

603 604 605
	if (lmb->flags & DRCONF_MEM_ASSIGNED)
		return -EINVAL;

606
	rc = update_lmb_associativity_index(lmb);
607 608 609 610 611
	if (rc) {
		dlpar_release_drc(lmb->drc_index);
		return rc;
	}

612 613
	block_sz = memory_block_size_bytes();

614 615 616
	/* Find the node id for this address. */
	nid = memory_add_physaddr_to_nid(lmb->base_addr);

617
	/* Add the memory */
618
	rc = __add_memory(nid, lmb->base_addr, block_sz);
619
	if (rc) {
620
		invalidate_lmb_associativity_index(lmb);
621 622 623 624 625
		return rc;
	}

	rc = dlpar_online_lmb(lmb);
	if (rc) {
626
		__remove_memory(nid, lmb->base_addr, block_sz);
627
		invalidate_lmb_associativity_index(lmb);
628
	} else {
629
		lmb->flags |= DRCONF_MEM_ASSIGNED;
630
	}
631 632 633 634

	return rc;
}

635
static int dlpar_memory_add_by_count(u32 lmbs_to_add)
636
{
637
	struct drmem_lmb *lmb;
638 639
	int lmbs_available = 0;
	int lmbs_added = 0;
640
	int rc;
641 642 643 644 645 646 647

	pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);

	if (lmbs_to_add == 0)
		return -EINVAL;

	/* Validate that there are enough LMBs to satisfy the request */
648 649
	for_each_drmem_lmb(lmb) {
		if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
650
			lmbs_available++;
651 652 653

		if (lmbs_available == lmbs_to_add)
			break;
654 655 656 657 658
	}

	if (lmbs_available < lmbs_to_add)
		return -EINVAL;

659 660
	for_each_drmem_lmb(lmb) {
		if (lmb->flags & DRCONF_MEM_ASSIGNED)
661 662
			continue;

663
		rc = dlpar_acquire_drc(lmb->drc_index);
664 665 666
		if (rc)
			continue;

667
		rc = dlpar_add_lmb(lmb);
668
		if (rc) {
669
			dlpar_release_drc(lmb->drc_index);
670 671 672
			continue;
		}

673 674 675
		/* Mark this lmb so we can remove it later if all of the
		 * requested LMBs cannot be added.
		 */
676 677 678 679 680
		drmem_mark_lmb_reserved(lmb);

		lmbs_added++;
		if (lmbs_added == lmbs_to_add)
			break;
681 682 683
	}

	if (lmbs_added != lmbs_to_add) {
684 685
		pr_err("Memory hot-add failed, removing any added LMBs\n");

686 687
		for_each_drmem_lmb(lmb) {
			if (!drmem_lmb_reserved(lmb))
688 689
				continue;

690
			rc = dlpar_remove_lmb(lmb);
691 692
			if (rc)
				pr_err("Failed to remove LMB, drc index %x\n",
693
				       lmb->drc_index);
694
			else
695 696 697
				dlpar_release_drc(lmb->drc_index);

			drmem_remove_lmb_reservation(lmb);
698
		}
699 700
		rc = -EINVAL;
	} else {
701 702
		for_each_drmem_lmb(lmb) {
			if (!drmem_lmb_reserved(lmb))
703 704 705
				continue;

			pr_info("Memory at %llx (drc index %x) was hot-added\n",
706 707
				lmb->base_addr, lmb->drc_index);
			drmem_remove_lmb_reservation(lmb);
708
		}
709
		rc = 0;
710 711 712 713 714
	}

	return rc;
}

715
static int dlpar_memory_add_by_index(u32 drc_index)
716
{
717 718
	struct drmem_lmb *lmb;
	int rc, lmb_found;
719 720 721 722

	pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);

	lmb_found = 0;
723 724
	for_each_drmem_lmb(lmb) {
		if (lmb->drc_index == drc_index) {
725
			lmb_found = 1;
726
			rc = dlpar_acquire_drc(lmb->drc_index);
727
			if (!rc) {
728
				rc = dlpar_add_lmb(lmb);
729
				if (rc)
730
					dlpar_release_drc(lmb->drc_index);
731 732
			}

733 734 735 736 737 738 739 740 741 742 743
			break;
		}
	}

	if (!lmb_found)
		rc = -EINVAL;

	if (rc)
		pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
	else
		pr_info("Memory at %llx (drc index %x) was hot-added\n",
744
			lmb->base_addr, drc_index);
745 746 747 748

	return rc;
}

749
static int dlpar_memory_add_by_ic(u32 lmbs_to_add, u32 drc_index)
750
{
751 752 753
	struct drmem_lmb *lmb, *start_lmb, *end_lmb;
	int lmbs_available = 0;
	int rc;
754 755 756 757 758 759 760

	pr_info("Attempting to hot-add %u LMB(s) at index %x\n",
		lmbs_to_add, drc_index);

	if (lmbs_to_add == 0)
		return -EINVAL;

761 762
	rc = get_lmb_range(drc_index, lmbs_to_add, &start_lmb, &end_lmb);
	if (rc)
763 764 765
		return -EINVAL;

	/* Validate that the LMBs in this range are not reserved */
766 767
	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
		if (lmb->flags & DRCONF_MEM_RESERVED)
768 769 770 771 772 773 774 775
			break;

		lmbs_available++;
	}

	if (lmbs_available < lmbs_to_add)
		return -EINVAL;

776 777
	for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
		if (lmb->flags & DRCONF_MEM_ASSIGNED)
778 779
			continue;

780
		rc = dlpar_acquire_drc(lmb->drc_index);
781 782 783
		if (rc)
			break;

784
		rc = dlpar_add_lmb(lmb);
785
		if (rc) {
786
			dlpar_release_drc(lmb->drc_index);
787 788 789
			break;
		}

790
		drmem_mark_lmb_reserved(lmb);
791 792 793 794 795
	}

	if (rc) {
		pr_err("Memory indexed-count-add failed, removing any added LMBs\n");

796 797
		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
			if (!drmem_lmb_reserved(lmb))
798 799
				continue;

800
			rc = dlpar_remove_lmb(lmb);
801 802
			if (rc)
				pr_err("Failed to remove LMB, drc index %x\n",
803
				       lmb->drc_index);
804
			else
805 806 807
				dlpar_release_drc(lmb->drc_index);

			drmem_remove_lmb_reservation(lmb);
808 809 810
		}
		rc = -EINVAL;
	} else {
811 812
		for_each_drmem_lmb_in_range(lmb, start_lmb, end_lmb) {
			if (!drmem_lmb_reserved(lmb))
813 814 815
				continue;

			pr_info("Memory at %llx (drc index %x) was hot-added\n",
816 817
				lmb->base_addr, lmb->drc_index);
			drmem_remove_lmb_reservation(lmb);
818 819 820 821 822 823
		}
	}

	return rc;
}

824 825
int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
{
826 827 828
	u32 count, drc_index;
	int rc;

829 830 831
	lock_device_hotplug();

	switch (hp_elog->action) {
832
	case PSERIES_HP_ELOG_ACTION_ADD:
833 834
		switch (hp_elog->id_type) {
		case PSERIES_HP_ELOG_ID_DRC_COUNT:
835
			count = hp_elog->_drc_u.drc_count;
836
			rc = dlpar_memory_add_by_count(count);
837 838
			break;
		case PSERIES_HP_ELOG_ID_DRC_INDEX:
839
			drc_index = hp_elog->_drc_u.drc_index;
840
			rc = dlpar_memory_add_by_index(drc_index);
841 842
			break;
		case PSERIES_HP_ELOG_ID_DRC_IC:
843 844
			count = hp_elog->_drc_u.ic.count;
			drc_index = hp_elog->_drc_u.ic.index;
845
			rc = dlpar_memory_add_by_ic(count, drc_index);
846 847
			break;
		default:
848
			rc = -EINVAL;
849
			break;
850 851
		}

852
		break;
853
	case PSERIES_HP_ELOG_ACTION_REMOVE:
854 855
		switch (hp_elog->id_type) {
		case PSERIES_HP_ELOG_ID_DRC_COUNT:
856
			count = hp_elog->_drc_u.drc_count;
857
			rc = dlpar_memory_remove_by_count(count);
858 859
			break;
		case PSERIES_HP_ELOG_ID_DRC_INDEX:
860
			drc_index = hp_elog->_drc_u.drc_index;
861
			rc = dlpar_memory_remove_by_index(drc_index);
862 863
			break;
		case PSERIES_HP_ELOG_ID_DRC_IC:
864 865
			count = hp_elog->_drc_u.ic.count;
			drc_index = hp_elog->_drc_u.ic.index;
866
			rc = dlpar_memory_remove_by_ic(count, drc_index);
867 868
			break;
		default:
869
			rc = -EINVAL;
870
			break;
871 872
		}

873
		break;
874 875 876 877 878 879
	default:
		pr_err("Invalid action (%d) specified\n", hp_elog->action);
		rc = -EINVAL;
		break;
	}

880
	if (!rc)
881 882
		rc = drmem_update_dt();

883 884 885 886
	unlock_device_hotplug();
	return rc;
}

887
static int pseries_add_mem_node(struct device_node *np)
888
{
889
	const __be32 *regs;
890
	unsigned long base;
891
	unsigned int lmb_size;
892 893 894 895 896
	int ret = -EINVAL;

	/*
	 * Check to see if we are actually adding memory
	 */
897
	if (!of_node_is_type(np, "memory"))
898 899 900
		return 0;

	/*
Y
Yinghai Lu 已提交
901
	 * Find the base and size of the memblock
902 903 904 905 906
	 */
	regs = of_get_property(np, "reg", NULL);
	if (!regs)
		return ret;

907 908
	base = be64_to_cpu(*(unsigned long *)regs);
	lmb_size = be32_to_cpu(regs[3]);
909 910 911 912

	/*
	 * Update memory region to represent the memory add
	 */
913
	ret = memblock_add(base, lmb_size);
914 915 916
	return (ret < 0) ? -EINVAL : 0;
}

917
static int pseries_memory_notifier(struct notifier_block *nb,
918
				   unsigned long action, void *data)
919
{
920
	struct of_reconfig_data *rd = data;
921
	int err = 0;
922 923

	switch (action) {
924
	case OF_RECONFIG_ATTACH_NODE:
925
		err = pseries_add_mem_node(rd->dn);
926
		break;
927
	case OF_RECONFIG_DETACH_NODE:
928
		err = pseries_remove_mem_node(rd->dn);
929 930
		break;
	}
931
	return notifier_from_errno(err);
932 933 934 935 936 937 938 939 940
}

static struct notifier_block pseries_mem_nb = {
	.notifier_call = pseries_memory_notifier,
};

static int __init pseries_memory_hotplug_init(void)
{
	if (firmware_has_feature(FW_FEATURE_LPAR))
941
		of_reconfig_notifier_register(&pseries_mem_nb);
942 943 944 945

	return 0;
}
machine_device_initcall(pseries, pseries_memory_hotplug_init);