memblock.c 46.9 KB
Newer Older
Y
Yinghai Lu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Procedures for maintaining information about logical memory blocks.
 *
 * Peter Bergner, IBM Corp.	June 2001.
 * Copyright (C) 2001 Peter Bergner.
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
14
#include <linux/slab.h>
Y
Yinghai Lu 已提交
15 16
#include <linux/init.h>
#include <linux/bitops.h>
17
#include <linux/poison.h>
18
#include <linux/pfn.h>
19 20
#include <linux/debugfs.h>
#include <linux/seq_file.h>
Y
Yinghai Lu 已提交
21 22
#include <linux/memblock.h>

23
#include <asm-generic/sections.h>
24 25 26
#include <linux/io.h>

#include "internal.h"
27

T
Tejun Heo 已提交
28 29
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30 31 32
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
#endif
T
Tejun Heo 已提交
33 34 35 36 37 38 39 40 41 42

struct memblock memblock __initdata_memblock = {
	.memory.regions		= memblock_memory_init_regions,
	.memory.cnt		= 1,	/* empty dummy entry */
	.memory.max		= INIT_MEMBLOCK_REGIONS,

	.reserved.regions	= memblock_reserved_init_regions,
	.reserved.cnt		= 1,	/* empty dummy entry */
	.reserved.max		= INIT_MEMBLOCK_REGIONS,

43 44 45 46 47 48
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	.physmem.regions	= memblock_physmem_init_regions,
	.physmem.cnt		= 1,	/* empty dummy entry */
	.physmem.max		= INIT_PHYSMEM_REGIONS,
#endif

49
	.bottom_up		= false,
T
Tejun Heo 已提交
50 51
	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
};
Y
Yinghai Lu 已提交
52

53
int memblock_debug __initdata_memblock;
54 55 56
#ifdef CONFIG_MOVABLE_NODE
bool movable_node_enabled __initdata_memblock = false;
#endif
57
static bool system_has_some_mirror __initdata_memblock = false;
58
static int memblock_can_resize __initdata_memblock;
59 60
static int memblock_memory_in_slab __initdata_memblock = 0;
static int memblock_reserved_in_slab __initdata_memblock = 0;
Y
Yinghai Lu 已提交
61

62 63 64 65 66
ulong __init_memblock choose_memblock_flags(void)
{
	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
}

67
/* inline so we don't get a warning when pr_debug is compiled out */
68 69
static __init_memblock const char *
memblock_type_name(struct memblock_type *type)
70 71 72 73 74 75 76 77 78
{
	if (type == &memblock.memory)
		return "memory";
	else if (type == &memblock.reserved)
		return "reserved";
	else
		return "unknown";
}

79 80 81 82 83 84
/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
{
	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
}

85 86 87
/*
 * Address comparison utilities
 */
88
static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
89
				       phys_addr_t base2, phys_addr_t size2)
Y
Yinghai Lu 已提交
90 91 92 93
{
	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
}

94 95
static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
					phys_addr_t base, phys_addr_t size)
96 97 98 99 100 101 102 103 104 105 106 107 108
{
	unsigned long i;

	for (i = 0; i < type->cnt; i++) {
		phys_addr_t rgnbase = type->regions[i].base;
		phys_addr_t rgnsize = type->regions[i].size;
		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
			break;
	}

	return (i < type->cnt) ? i : -1;
}

109 110 111 112 113 114
/*
 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
115
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
116
 * @flags: pick from blocks based on memory attributes
117 118 119 120 121 122 123 124
 *
 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 *
 * RETURNS:
 * Found address on success, 0 on failure.
 */
static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
125 126
				phys_addr_t size, phys_addr_t align, int nid,
				ulong flags)
127 128 129 130
{
	phys_addr_t this_start, this_end, cand;
	u64 i;

131
	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
132 133 134 135 136 137 138 139 140 141 142
		this_start = clamp(this_start, start, end);
		this_end = clamp(this_end, start, end);

		cand = round_up(this_start, align);
		if (cand < this_end && this_end - cand >= size)
			return cand;
	}

	return 0;
}

143
/**
144
 * __memblock_find_range_top_down - find free area utility, in top-down
145 146 147 148
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
149
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
150
 * @flags: pick from blocks based on memory attributes
151
 *
152
 * Utility called from memblock_find_in_range_node(), find free area top-down.
153 154
 *
 * RETURNS:
155
 * Found address on success, 0 on failure.
156
 */
157 158
static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
159 160
			       phys_addr_t size, phys_addr_t align, int nid,
			       ulong flags)
161 162 163 164
{
	phys_addr_t this_start, this_end, cand;
	u64 i;

165 166
	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
					NULL) {
167 168 169 170 171 172 173 174 175 176
		this_start = clamp(this_start, start, end);
		this_end = clamp(this_end, start, end);

		if (this_end < size)
			continue;

		cand = round_down(this_end - size, align);
		if (cand >= this_start)
			return cand;
	}
177

178 179
	return 0;
}
180

181 182 183 184
/**
 * memblock_find_in_range_node - find free area in given range and node
 * @size: size of free area to find
 * @align: alignment of free area to find
185 186
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
187
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
188
 * @flags: pick from blocks based on memory attributes
189 190 191
 *
 * Find @size free area aligned to @align in the specified range and node.
 *
192 193 194 195 196 197 198 199
 * When allocation direction is bottom-up, the @start should be greater
 * than the end of the kernel image. Otherwise, it will be trimmed. The
 * reason is that we want the bottom-up allocation just near the kernel
 * image so it is highly likely that the allocated memory and the kernel
 * will reside in the same node.
 *
 * If bottom-up allocation failed, will try to allocate memory top-down.
 *
200
 * RETURNS:
201
 * Found address on success, 0 on failure.
202
 */
203 204
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
					phys_addr_t align, phys_addr_t start,
205
					phys_addr_t end, int nid, ulong flags)
206
{
207
	phys_addr_t kernel_end, ret;
208

209 210 211 212 213 214 215
	/* pump up @end */
	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
		end = memblock.current_limit;

	/* avoid allocating the first page */
	start = max_t(phys_addr_t, start, PAGE_SIZE);
	end = max(start, end);
216 217 218 219 220 221 222 223 224 225 226 227 228 229
	kernel_end = __pa_symbol(_end);

	/*
	 * try bottom-up allocation only when bottom-up mode
	 * is set and @end is above the kernel image.
	 */
	if (memblock_bottom_up() && end > kernel_end) {
		phys_addr_t bottom_up_start;

		/* make sure we will allocate above the kernel */
		bottom_up_start = max(start, kernel_end);

		/* ok, try bottom-up allocation first */
		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
230
						      size, align, nid, flags);
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
		if (ret)
			return ret;

		/*
		 * we always limit bottom-up allocation above the kernel,
		 * but top-down allocation doesn't have the limit, so
		 * retrying top-down allocation may succeed when bottom-up
		 * allocation failed.
		 *
		 * bottom-up allocation is expected to be fail very rarely,
		 * so we use WARN_ONCE() here to see the stack trace if
		 * fail happens.
		 */
		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
			     "memory hotunplug may be affected\n");
	}
247

248 249
	return __memblock_find_range_top_down(start, end, size, align, nid,
					      flags);
250 251
}

252 253 254 255 256 257 258 259 260 261
/**
 * memblock_find_in_range - find free area in given range
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
 *
 * Find @size free area aligned to @align in the specified range.
 *
 * RETURNS:
262
 * Found address on success, 0 on failure.
263
 */
264 265 266
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
					phys_addr_t end, phys_addr_t size,
					phys_addr_t align)
267
{
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	phys_addr_t ret;
	ulong flags = choose_memblock_flags();

again:
	ret = memblock_find_in_range_node(size, align, start, end,
					    NUMA_NO_NODE, flags);

	if (!ret && (flags & MEMBLOCK_MIRROR)) {
		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
			&size);
		flags &= ~MEMBLOCK_MIRROR;
		goto again;
	}

	return ret;
283 284
}

285
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
Y
Yinghai Lu 已提交
286
{
287
	type->total_size -= type->regions[r].size;
T
Tejun Heo 已提交
288 289
	memmove(&type->regions[r], &type->regions[r + 1],
		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
290
	type->cnt--;
Y
Yinghai Lu 已提交
291

292 293
	/* Special case for empty arrays */
	if (type->cnt == 0) {
294
		WARN_ON(type->total_size != 0);
295 296 297
		type->cnt = 1;
		type->regions[0].base = 0;
		type->regions[0].size = 0;
298
		type->regions[0].flags = 0;
T
Tejun Heo 已提交
299
		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
300
	}
Y
Yinghai Lu 已提交
301 302
}

303 304
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK

305 306 307 308 309 310 311 312 313 314 315 316
phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
					phys_addr_t *addr)
{
	if (memblock.reserved.regions == memblock_reserved_init_regions)
		return 0;

	*addr = __pa(memblock.reserved.regions);

	return PAGE_ALIGN(sizeof(struct memblock_region) *
			  memblock.reserved.max);
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330
phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
					phys_addr_t *addr)
{
	if (memblock.memory.regions == memblock_memory_init_regions)
		return 0;

	*addr = __pa(memblock.memory.regions);

	return PAGE_ALIGN(sizeof(struct memblock_region) *
			  memblock.memory.max);
}

#endif

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
/**
 * memblock_double_array - double the size of the memblock regions array
 * @type: memblock type of the regions array being doubled
 * @new_area_start: starting address of memory range to avoid overlap with
 * @new_area_size: size of memory range to avoid overlap with
 *
 * Double the size of the @type regions array. If memblock is being used to
 * allocate memory for a new reserved regions array and there is a previously
 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
 * waiting to be reserved, ensure the memory used by the new array does
 * not overlap.
 *
 * RETURNS:
 * 0 on success, -1 on failure.
 */
static int __init_memblock memblock_double_array(struct memblock_type *type,
						phys_addr_t new_area_start,
						phys_addr_t new_area_size)
349 350
{
	struct memblock_region *new_array, *old_array;
351
	phys_addr_t old_alloc_size, new_alloc_size;
352 353
	phys_addr_t old_size, new_size, addr;
	int use_slab = slab_is_available();
354
	int *in_slab;
355 356 357 358 359 360 361 362 363 364

	/* We don't allow resizing until we know about the reserved regions
	 * of memory that aren't suitable for allocation
	 */
	if (!memblock_can_resize)
		return -1;

	/* Calculate new doubled size */
	old_size = type->max * sizeof(struct memblock_region);
	new_size = old_size << 1;
365 366 367 368 369 370
	/*
	 * We need to allocated new one align to PAGE_SIZE,
	 *   so we can free them completely later.
	 */
	old_alloc_size = PAGE_ALIGN(old_size);
	new_alloc_size = PAGE_ALIGN(new_size);
371

372 373 374 375 376 377
	/* Retrieve the slab flag */
	if (type == &memblock.memory)
		in_slab = &memblock_memory_in_slab;
	else
		in_slab = &memblock_reserved_in_slab;

378 379 380
	/* Try to find some space for it.
	 *
	 * WARNING: We assume that either slab_is_available() and we use it or
381 382 383
	 * we use MEMBLOCK for allocations. That means that this is unsafe to
	 * use when bootmem is currently active (unless bootmem itself is
	 * implemented on top of MEMBLOCK which isn't the case yet)
384 385
	 *
	 * This should however not be an issue for now, as we currently only
386 387
	 * call into MEMBLOCK while it's still active, or much later when slab
	 * is active for memory hotplug operations
388 389 390
	 */
	if (use_slab) {
		new_array = kmalloc(new_size, GFP_KERNEL);
T
Tejun Heo 已提交
391
		addr = new_array ? __pa(new_array) : 0;
392
	} else {
393 394 395 396 397 398
		/* only exclude range when trying to double reserved.regions */
		if (type != &memblock.reserved)
			new_area_start = new_area_size = 0;

		addr = memblock_find_in_range(new_area_start + new_area_size,
						memblock.current_limit,
399
						new_alloc_size, PAGE_SIZE);
400 401
		if (!addr && new_area_size)
			addr = memblock_find_in_range(0,
402 403
				min(new_area_start, memblock.current_limit),
				new_alloc_size, PAGE_SIZE);
404

405
		new_array = addr ? __va(addr) : NULL;
406
	}
T
Tejun Heo 已提交
407
	if (!addr) {
408 409 410 411 412
		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
		       memblock_type_name(type), type->max, type->max * 2);
		return -1;
	}

413 414 415
	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
			memblock_type_name(type), type->max * 2, (u64)addr,
			(u64)addr + new_size - 1);
416

417 418 419 420
	/*
	 * Found space, we now need to move the array over before we add the
	 * reserved region since it may be our reserved array itself that is
	 * full.
421 422 423 424 425 426 427
	 */
	memcpy(new_array, type->regions, old_size);
	memset(new_array + type->max, 0, old_size);
	old_array = type->regions;
	type->regions = new_array;
	type->max <<= 1;

428
	/* Free old array. We needn't free it if the array is the static one */
429 430 431 432
	if (*in_slab)
		kfree(old_array);
	else if (old_array != memblock_memory_init_regions &&
		 old_array != memblock_reserved_init_regions)
433
		memblock_free(__pa(old_array), old_alloc_size);
434

435 436 437
	/*
	 * Reserve the new array if that comes from the memblock.  Otherwise, we
	 * needn't do it
438 439
	 */
	if (!use_slab)
440
		BUG_ON(memblock_reserve(addr, new_alloc_size));
441 442 443 444

	/* Update slab flag */
	*in_slab = use_slab;

445 446 447
	return 0;
}

448 449 450 451 452 453 454
/**
 * memblock_merge_regions - merge neighboring compatible regions
 * @type: memblock type to scan
 *
 * Scan @type and merge neighboring compatible regions.
 */
static void __init_memblock memblock_merge_regions(struct memblock_type *type)
Y
Yinghai Lu 已提交
455
{
456
	int i = 0;
Y
Yinghai Lu 已提交
457

458 459 460 461
	/* cnt never goes below 1 */
	while (i < type->cnt - 1) {
		struct memblock_region *this = &type->regions[i];
		struct memblock_region *next = &type->regions[i + 1];
Y
Yinghai Lu 已提交
462

T
Tejun Heo 已提交
463 464
		if (this->base + this->size != next->base ||
		    memblock_get_region_node(this) !=
465 466
		    memblock_get_region_node(next) ||
		    this->flags != next->flags) {
467 468 469
			BUG_ON(this->base + this->size > next->base);
			i++;
			continue;
470 471
		}

472
		this->size += next->size;
473 474
		/* move forward from next + 1, index of which is i + 2 */
		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
475
		type->cnt--;
Y
Yinghai Lu 已提交
476
	}
477
}
Y
Yinghai Lu 已提交
478

479 480
/**
 * memblock_insert_region - insert new memblock region
481 482 483 484 485
 * @type:	memblock type to insert into
 * @idx:	index for the insertion point
 * @base:	base address of the new region
 * @size:	size of the new region
 * @nid:	node id of the new region
486
 * @flags:	flags of the new region
487 488 489 490 491 492
 *
 * Insert new memblock region [@base,@base+@size) into @type at @idx.
 * @type must already have extra room to accomodate the new region.
 */
static void __init_memblock memblock_insert_region(struct memblock_type *type,
						   int idx, phys_addr_t base,
493 494
						   phys_addr_t size,
						   int nid, unsigned long flags)
495 496 497 498 499 500 501
{
	struct memblock_region *rgn = &type->regions[idx];

	BUG_ON(type->cnt >= type->max);
	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
	rgn->base = base;
	rgn->size = size;
502
	rgn->flags = flags;
T
Tejun Heo 已提交
503
	memblock_set_region_node(rgn, nid);
504
	type->cnt++;
505
	type->total_size += size;
506 507 508
}

/**
509
 * memblock_add_range - add new memblock region
510 511 512
 * @type: memblock type to add new region into
 * @base: base address of the new region
 * @size: size of the new region
513
 * @nid: nid of the new region
514
 * @flags: flags of the new region
515 516 517 518 519 520 521 522 523
 *
 * Add new memblock region [@base,@base+@size) into @type.  The new region
 * is allowed to overlap with existing ones - overlaps don't affect already
 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 * compatible regions are merged) after the addition.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
524
int __init_memblock memblock_add_range(struct memblock_type *type,
525 526
				phys_addr_t base, phys_addr_t size,
				int nid, unsigned long flags)
527 528
{
	bool insert = false;
529 530
	phys_addr_t obase = base;
	phys_addr_t end = base + memblock_cap_size(base, &size);
531 532
	int i, nr_new;

533 534 535
	if (!size)
		return 0;

536 537
	/* special case for empty array */
	if (type->regions[0].size == 0) {
538
		WARN_ON(type->cnt != 1 || type->total_size);
539 540
		type->regions[0].base = base;
		type->regions[0].size = size;
541
		type->regions[0].flags = flags;
542
		memblock_set_region_node(&type->regions[0], nid);
543
		type->total_size = size;
544
		return 0;
Y
Yinghai Lu 已提交
545
	}
546 547 548 549 550
repeat:
	/*
	 * The following is executed twice.  Once with %false @insert and
	 * then with %true.  The first counts the number of regions needed
	 * to accomodate the new area.  The second actually inserts them.
551
	 */
552 553
	base = obase;
	nr_new = 0;
Y
Yinghai Lu 已提交
554

555 556 557 558 559 560
	for (i = 0; i < type->cnt; i++) {
		struct memblock_region *rgn = &type->regions[i];
		phys_addr_t rbase = rgn->base;
		phys_addr_t rend = rbase + rgn->size;

		if (rbase >= end)
Y
Yinghai Lu 已提交
561
			break;
562 563 564 565 566 567 568 569 570 571
		if (rend <= base)
			continue;
		/*
		 * @rgn overlaps.  If it separates the lower part of new
		 * area, insert that portion.
		 */
		if (rbase > base) {
			nr_new++;
			if (insert)
				memblock_insert_region(type, i++, base,
572 573
						       rbase - base, nid,
						       flags);
Y
Yinghai Lu 已提交
574
		}
575 576
		/* area below @rend is dealt with, forget about it */
		base = min(rend, end);
Y
Yinghai Lu 已提交
577
	}
578 579 580 581 582

	/* insert the remaining portion */
	if (base < end) {
		nr_new++;
		if (insert)
583 584
			memblock_insert_region(type, i, base, end - base,
					       nid, flags);
Y
Yinghai Lu 已提交
585 586
	}

587 588 589
	/*
	 * If this was the first round, resize array and repeat for actual
	 * insertions; otherwise, merge and return.
590
	 */
591 592
	if (!insert) {
		while (type->cnt + nr_new > type->max)
593
			if (memblock_double_array(type, obase, size) < 0)
594 595 596 597 598 599
				return -ENOMEM;
		insert = true;
		goto repeat;
	} else {
		memblock_merge_regions(type);
		return 0;
600
	}
Y
Yinghai Lu 已提交
601 602
}

603 604 605
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
				       int nid)
{
606
	return memblock_add_range(&memblock.memory, base, size, nid, 0);
607 608
}

609 610 611 612 613 614 615 616 617 618 619 620 621 622 623
static int __init_memblock memblock_add_region(phys_addr_t base,
						phys_addr_t size,
						int nid,
						unsigned long flags)
{
	struct memblock_type *_rgn = &memblock.memory;

	memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
		     (unsigned long long)base,
		     (unsigned long long)base + size - 1,
		     flags, (void *)_RET_IP_);

	return memblock_add_range(_rgn, base, size, nid, flags);
}

624
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
625
{
626
	return memblock_add_region(base, size, MAX_NUMNODES, 0);
Y
Yinghai Lu 已提交
627 628
}

629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648
/**
 * memblock_isolate_range - isolate given range into disjoint memblocks
 * @type: memblock type to isolate range for
 * @base: base of range to isolate
 * @size: size of range to isolate
 * @start_rgn: out parameter for the start of isolated region
 * @end_rgn: out parameter for the end of isolated region
 *
 * Walk @type and ensure that regions don't cross the boundaries defined by
 * [@base,@base+@size).  Crossing regions are split at the boundaries,
 * which may create at most two more regions.  The index of the first
 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int __init_memblock memblock_isolate_range(struct memblock_type *type,
					phys_addr_t base, phys_addr_t size,
					int *start_rgn, int *end_rgn)
{
649
	phys_addr_t end = base + memblock_cap_size(base, &size);
650 651 652 653
	int i;

	*start_rgn = *end_rgn = 0;

654 655 656
	if (!size)
		return 0;

657 658
	/* we'll create at most two more regions */
	while (type->cnt + 2 > type->max)
659
		if (memblock_double_array(type, base, size) < 0)
660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677
			return -ENOMEM;

	for (i = 0; i < type->cnt; i++) {
		struct memblock_region *rgn = &type->regions[i];
		phys_addr_t rbase = rgn->base;
		phys_addr_t rend = rbase + rgn->size;

		if (rbase >= end)
			break;
		if (rend <= base)
			continue;

		if (rbase < base) {
			/*
			 * @rgn intersects from below.  Split and continue
			 * to process the next region - the new top half.
			 */
			rgn->base = base;
678 679
			rgn->size -= base - rbase;
			type->total_size -= base - rbase;
680
			memblock_insert_region(type, i, rbase, base - rbase,
681 682
					       memblock_get_region_node(rgn),
					       rgn->flags);
683 684 685 686 687 688
		} else if (rend > end) {
			/*
			 * @rgn intersects from above.  Split and redo the
			 * current region - the new bottom half.
			 */
			rgn->base = end;
689 690
			rgn->size -= end - rbase;
			type->total_size -= end - rbase;
691
			memblock_insert_region(type, i--, rbase, end - rbase,
692 693
					       memblock_get_region_node(rgn),
					       rgn->flags);
694 695 696 697 698 699 700 701 702 703 704
		} else {
			/* @rgn is fully contained, record it */
			if (!*end_rgn)
				*start_rgn = i;
			*end_rgn = i + 1;
		}
	}

	return 0;
}

705 706
int __init_memblock memblock_remove_range(struct memblock_type *type,
					  phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
707
{
708 709
	int start_rgn, end_rgn;
	int i, ret;
Y
Yinghai Lu 已提交
710

711 712 713
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;
Y
Yinghai Lu 已提交
714

715 716
	for (i = end_rgn - 1; i >= start_rgn; i--)
		memblock_remove_region(type, i);
717
	return 0;
Y
Yinghai Lu 已提交
718 719
}

720
int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
721
{
722
	return memblock_remove_range(&memblock.memory, base, size);
Y
Yinghai Lu 已提交
723 724
}

725

726
int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
727
{
728
	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
729
		     (unsigned long long)base,
730
		     (unsigned long long)base + size - 1,
731
		     (void *)_RET_IP_);
732

733
	kmemleak_free_part(__va(base), size);
734
	return memblock_remove_range(&memblock.reserved, base, size);
Y
Yinghai Lu 已提交
735 736
}

737 738 739 740
static int __init_memblock memblock_reserve_region(phys_addr_t base,
						   phys_addr_t size,
						   int nid,
						   unsigned long flags)
Y
Yinghai Lu 已提交
741
{
742
	struct memblock_type *type = &memblock.reserved;
Y
Yinghai Lu 已提交
743

744
	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
745
		     (unsigned long long)base,
746
		     (unsigned long long)base + size - 1,
747 748
		     flags, (void *)_RET_IP_);

749
	return memblock_add_range(type, base, size, nid, flags);
750
}
Y
Yinghai Lu 已提交
751

752 753 754
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
{
	return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
Y
Yinghai Lu 已提交
755 756
}

757 758
/**
 *
759
 * This function isolates region [@base, @base + @size), and sets/clears flag
760 761 762
 *
 * Return 0 on succees, -errno on failure.
 */
763 764
static int __init_memblock memblock_setclr_flag(phys_addr_t base,
				phys_addr_t size, int set, int flag)
765 766 767 768 769 770 771 772 773
{
	struct memblock_type *type = &memblock.memory;
	int i, ret, start_rgn, end_rgn;

	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;

	for (i = start_rgn; i < end_rgn; i++)
774 775 776 777
		if (set)
			memblock_set_region_flags(&type->regions[i], flag);
		else
			memblock_clear_region_flags(&type->regions[i], flag);
778 779 780 781 782 783

	memblock_merge_regions(type);
	return 0;
}

/**
784
 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
785 786 787
 * @base: the base phys addr of the region
 * @size: the size of the region
 *
788 789 790 791 792 793 794 795 796 797 798
 * Return 0 on succees, -errno on failure.
 */
int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
{
	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
}

/**
 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 * @base: the base phys addr of the region
 * @size: the size of the region
799 800 801 802 803
 *
 * Return 0 on succees, -errno on failure.
 */
int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
{
804
	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
805 806
}

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821
/**
 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 * @base: the base phys addr of the region
 * @size: the size of the region
 *
 * Return 0 on succees, -errno on failure.
 */
int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
{
	system_has_some_mirror = true;

	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
}


822
/**
823
 * __next__mem_range - next function for for_each_free_mem_range() etc.
824
 * @idx: pointer to u64 loop variable
825
 * @nid: node selector, %NUMA_NO_NODE for all nodes
826
 * @flags: pick from blocks based on memory attributes
827 828
 * @type_a: pointer to memblock_type from where the range is taken
 * @type_b: pointer to memblock_type which excludes memory from being taken
W
Wanpeng Li 已提交
829 830 831
 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @out_nid: ptr to int for nid of the range, can be %NULL
832
 *
833
 * Find the first area from *@idx which matches @nid, fill the out
834
 * parameters, and update *@idx for the next iteration.  The lower 32bit of
835 836
 * *@idx contains index into type_a and the upper 32bit indexes the
 * areas before each region in type_b.	For example, if type_b regions
837 838 839 840 841 842 843 844 845 846 847
 * look like the following,
 *
 *	0:[0-16), 1:[32-48), 2:[128-130)
 *
 * The upper 32bit indexes the following regions.
 *
 *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 *
 * As both region arrays are sorted, the function advances the two indices
 * in lockstep and returns each intersection.
 */
848
void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
849 850 851 852
				      struct memblock_type *type_a,
				      struct memblock_type *type_b,
				      phys_addr_t *out_start,
				      phys_addr_t *out_end, int *out_nid)
853
{
854 855
	int idx_a = *idx & 0xffffffff;
	int idx_b = *idx >> 32;
856

857 858
	if (WARN_ONCE(nid == MAX_NUMNODES,
	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
859
		nid = NUMA_NO_NODE;
860

861 862 863
	for (; idx_a < type_a->cnt; idx_a++) {
		struct memblock_region *m = &type_a->regions[idx_a];

864 865
		phys_addr_t m_start = m->base;
		phys_addr_t m_end = m->base + m->size;
866
		int	    m_nid = memblock_get_region_node(m);
867 868

		/* only memory regions are associated with nodes, check it */
869
		if (nid != NUMA_NO_NODE && nid != m_nid)
870 871
			continue;

872 873 874 875
		/* skip hotpluggable memory regions if needed */
		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
			continue;

876 877 878 879
		/* if we want mirror memory skip non-mirror memory regions */
		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
			continue;

880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901
		if (!type_b) {
			if (out_start)
				*out_start = m_start;
			if (out_end)
				*out_end = m_end;
			if (out_nid)
				*out_nid = m_nid;
			idx_a++;
			*idx = (u32)idx_a | (u64)idx_b << 32;
			return;
		}

		/* scan areas before each reservation */
		for (; idx_b < type_b->cnt + 1; idx_b++) {
			struct memblock_region *r;
			phys_addr_t r_start;
			phys_addr_t r_end;

			r = &type_b->regions[idx_b];
			r_start = idx_b ? r[-1].base + r[-1].size : 0;
			r_end = idx_b < type_b->cnt ?
				r->base : ULLONG_MAX;
902

903 904 905 906
			/*
			 * if idx_b advanced past idx_a,
			 * break out to advance idx_a
			 */
907 908 909 910 911
			if (r_start >= m_end)
				break;
			/* if the two regions intersect, we're done */
			if (m_start < r_end) {
				if (out_start)
912 913
					*out_start =
						max(m_start, r_start);
914 915 916
				if (out_end)
					*out_end = min(m_end, r_end);
				if (out_nid)
917
					*out_nid = m_nid;
918
				/*
919 920
				 * The region which ends first is
				 * advanced for the next iteration.
921 922
				 */
				if (m_end <= r_end)
923
					idx_a++;
924
				else
925 926
					idx_b++;
				*idx = (u32)idx_a | (u64)idx_b << 32;
927 928 929 930 931 932 933 934 935
				return;
			}
		}
	}

	/* signal end of iteration */
	*idx = ULLONG_MAX;
}

936
/**
937 938 939 940 941
 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
 *
 * Finds the next range from type_a which is not marked as unsuitable
 * in type_b.
 *
942
 * @idx: pointer to u64 loop variable
943
 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
944
 * @flags: pick from blocks based on memory attributes
945 946
 * @type_a: pointer to memblock_type from where the range is taken
 * @type_b: pointer to memblock_type which excludes memory from being taken
W
Wanpeng Li 已提交
947 948 949
 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @out_nid: ptr to int for nid of the range, can be %NULL
950
 *
951
 * Reverse of __next_mem_range().
952
 */
953
void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
954 955 956 957
					  struct memblock_type *type_a,
					  struct memblock_type *type_b,
					  phys_addr_t *out_start,
					  phys_addr_t *out_end, int *out_nid)
958
{
959 960
	int idx_a = *idx & 0xffffffff;
	int idx_b = *idx >> 32;
961

962 963
	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
		nid = NUMA_NO_NODE;
964 965

	if (*idx == (u64)ULLONG_MAX) {
966 967
		idx_a = type_a->cnt - 1;
		idx_b = type_b->cnt;
968 969
	}

970 971 972
	for (; idx_a >= 0; idx_a--) {
		struct memblock_region *m = &type_a->regions[idx_a];

973 974
		phys_addr_t m_start = m->base;
		phys_addr_t m_end = m->base + m->size;
975
		int m_nid = memblock_get_region_node(m);
976 977

		/* only memory regions are associated with nodes, check it */
978
		if (nid != NUMA_NO_NODE && nid != m_nid)
979 980
			continue;

981 982 983 984
		/* skip hotpluggable memory regions if needed */
		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
			continue;

985 986 987 988
		/* if we want mirror memory skip non-mirror memory regions */
		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
			continue;

989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014
		if (!type_b) {
			if (out_start)
				*out_start = m_start;
			if (out_end)
				*out_end = m_end;
			if (out_nid)
				*out_nid = m_nid;
			idx_a++;
			*idx = (u32)idx_a | (u64)idx_b << 32;
			return;
		}

		/* scan areas before each reservation */
		for (; idx_b >= 0; idx_b--) {
			struct memblock_region *r;
			phys_addr_t r_start;
			phys_addr_t r_end;

			r = &type_b->regions[idx_b];
			r_start = idx_b ? r[-1].base + r[-1].size : 0;
			r_end = idx_b < type_b->cnt ?
				r->base : ULLONG_MAX;
			/*
			 * if idx_b advanced past idx_a,
			 * break out to advance idx_a
			 */
1015 1016 1017 1018 1019 1020 1021 1022 1023 1024

			if (r_end <= m_start)
				break;
			/* if the two regions intersect, we're done */
			if (m_end > r_start) {
				if (out_start)
					*out_start = max(m_start, r_start);
				if (out_end)
					*out_end = min(m_end, r_end);
				if (out_nid)
1025
					*out_nid = m_nid;
1026
				if (m_start >= r_start)
1027
					idx_a--;
1028
				else
1029 1030
					idx_b--;
				*idx = (u32)idx_a | (u64)idx_b << 32;
1031 1032 1033 1034
				return;
			}
		}
	}
1035
	/* signal end of iteration */
1036 1037 1038
	*idx = ULLONG_MAX;
}

T
Tejun Heo 已提交
1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
 * Common iterator interface used to define for_each_mem_range().
 */
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
				unsigned long *out_start_pfn,
				unsigned long *out_end_pfn, int *out_nid)
{
	struct memblock_type *type = &memblock.memory;
	struct memblock_region *r;

	while (++*idx < type->cnt) {
		r = &type->regions[*idx];

		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
			continue;
		if (nid == MAX_NUMNODES || nid == r->nid)
			break;
	}
	if (*idx >= type->cnt) {
		*idx = -1;
		return;
	}

	if (out_start_pfn)
		*out_start_pfn = PFN_UP(r->base);
	if (out_end_pfn)
		*out_end_pfn = PFN_DOWN(r->base + r->size);
	if (out_nid)
		*out_nid = r->nid;
}

/**
 * memblock_set_node - set node ID on memblock regions
 * @base: base of area to set node ID for
 * @size: size of area to set node ID for
1075
 * @type: memblock type to set node ID for
T
Tejun Heo 已提交
1076 1077
 * @nid: node ID to set
 *
1078
 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
T
Tejun Heo 已提交
1079 1080 1081 1082 1083 1084
 * Regions which cross the area boundaries are split as necessary.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1085
				      struct memblock_type *type, int nid)
T
Tejun Heo 已提交
1086
{
1087 1088
	int start_rgn, end_rgn;
	int i, ret;
T
Tejun Heo 已提交
1089

1090 1091 1092
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;
T
Tejun Heo 已提交
1093

1094
	for (i = start_rgn; i < end_rgn; i++)
1095
		memblock_set_region_node(&type->regions[i], nid);
T
Tejun Heo 已提交
1096 1097 1098 1099 1100 1101

	memblock_merge_regions(type);
	return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

1102 1103
static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
					phys_addr_t align, phys_addr_t start,
1104
					phys_addr_t end, int nid, ulong flags)
Y
Yinghai Lu 已提交
1105
{
1106
	phys_addr_t found;
Y
Yinghai Lu 已提交
1107

1108 1109
	if (!align)
		align = SMP_CACHE_BYTES;
1110

1111 1112
	found = memblock_find_in_range_node(size, align, start, end, nid,
					    flags);
1113 1114 1115 1116 1117 1118
	if (found && !memblock_reserve(found, size)) {
		/*
		 * The min_count is set to 0 so that memblock allocations are
		 * never reported as leaks.
		 */
		kmemleak_alloc(__va(found), size, 0, 0);
1119
		return found;
1120
	}
1121
	return 0;
Y
Yinghai Lu 已提交
1122 1123
}

1124
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1125 1126
					phys_addr_t start, phys_addr_t end,
					ulong flags)
1127
{
1128 1129
	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
					flags);
1130 1131 1132 1133
}

static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
					phys_addr_t align, phys_addr_t max_addr,
1134
					int nid, ulong flags)
1135
{
1136
	return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1137 1138
}

1139 1140
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152
	ulong flags = choose_memblock_flags();
	phys_addr_t ret;

again:
	ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
				      nid, flags);

	if (!ret && (flags & MEMBLOCK_MIRROR)) {
		flags &= ~MEMBLOCK_MIRROR;
		goto again;
	}
	return ret;
1153 1154 1155 1156
}

phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
{
1157 1158
	return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
				       MEMBLOCK_NONE);
1159 1160
}

1161
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
Y
Yinghai Lu 已提交
1162
{
1163 1164 1165 1166 1167 1168 1169 1170 1171
	phys_addr_t alloc;

	alloc = __memblock_alloc_base(size, align, max_addr);

	if (alloc == 0)
		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
		      (unsigned long long) size, (unsigned long long) max_addr);

	return alloc;
Y
Yinghai Lu 已提交
1172 1173
}

1174
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
Y
Yinghai Lu 已提交
1175
{
1176 1177
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
Y
Yinghai Lu 已提交
1178

1179 1180 1181 1182 1183 1184
phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
	phys_addr_t res = memblock_alloc_nid(size, align, nid);

	if (res)
		return res;
1185
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
Y
Yinghai Lu 已提交
1186 1187
}

1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221
/**
 * memblock_virt_alloc_internal - allocate boot memory block
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region to allocate (phys address)
 * @max_addr: the upper bound of the memory region to allocate (phys address)
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * The @min_addr limit is dropped if it can not be satisfied and the allocation
 * will fall back to memory below @min_addr. Also, allocation may fall back
 * to any node in the system if the specified node can not
 * hold the requested memory.
 *
 * The allocation is performed from memory region limited by
 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
 *
 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
 *
 * The phys address of allocated boot memory block is converted to virtual and
 * allocated memory is reset to 0.
 *
 * In addition, function sets the min_count to 0 using kmemleak_alloc for
 * allocated boot memory block, so that it is never reported as leaks.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
static void * __init memblock_virt_alloc_internal(
				phys_addr_t size, phys_addr_t align,
				phys_addr_t min_addr, phys_addr_t max_addr,
				int nid)
{
	phys_addr_t alloc;
	void *ptr;
1222
	ulong flags = choose_memblock_flags();
1223

1224 1225
	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
		nid = NUMA_NO_NODE;
1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237

	/*
	 * Detect any accidental use of these APIs after slab is ready, as at
	 * this moment memblock may be deinitialized already and its
	 * internal data may be destroyed (after execution of free_all_bootmem)
	 */
	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc_node(size, GFP_NOWAIT, nid);

	if (!align)
		align = SMP_CACHE_BYTES;

1238 1239 1240
	if (max_addr > memblock.current_limit)
		max_addr = memblock.current_limit;

1241 1242
again:
	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1243
					    nid, flags);
1244 1245 1246 1247 1248
	if (alloc)
		goto done;

	if (nid != NUMA_NO_NODE) {
		alloc = memblock_find_in_range_node(size, align, min_addr,
1249
						    max_addr, NUMA_NO_NODE,
1250
						    flags);
1251 1252 1253 1254 1255 1256 1257 1258 1259
		if (alloc)
			goto done;
	}

	if (min_addr) {
		min_addr = 0;
		goto again;
	}

1260 1261 1262 1263 1264 1265 1266 1267
	if (flags & MEMBLOCK_MIRROR) {
		flags &= ~MEMBLOCK_MIRROR;
		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
			&size);
		goto again;
	}

	return NULL;
1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365
done:
	memblock_reserve(alloc, size);
	ptr = phys_to_virt(alloc);
	memset(ptr, 0, size);

	/*
	 * The min_count is set to 0 so that bootmem allocated blocks
	 * are never reported as leaks. This is because many of these blocks
	 * are only referred via the physical address which is not
	 * looked up by kmemleak.
	 */
	kmemleak_alloc(ptr, size, 0, 0);

	return ptr;
}

/**
 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
 * additional debug information (including caller info), if enabled.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void * __init memblock_virt_alloc_try_nid_nopanic(
				phys_addr_t size, phys_addr_t align,
				phys_addr_t min_addr, phys_addr_t max_addr,
				int nid)
{
	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
		     (u64)max_addr, (void *)_RET_IP_);
	return memblock_virt_alloc_internal(size, align, min_addr,
					     max_addr, nid);
}

/**
 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
 * which provides debug information (including caller info), if enabled,
 * and panics if the request can not be satisfied.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void * __init memblock_virt_alloc_try_nid(
			phys_addr_t size, phys_addr_t align,
			phys_addr_t min_addr, phys_addr_t max_addr,
			int nid)
{
	void *ptr;

	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
		     (u64)max_addr, (void *)_RET_IP_);
	ptr = memblock_virt_alloc_internal(size, align,
					   min_addr, max_addr, nid);
	if (ptr)
		return ptr;

	panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
	      __func__, (u64)size, (u64)align, nid, (u64)min_addr,
	      (u64)max_addr);
	return NULL;
}

/**
 * __memblock_free_early - free boot memory block
 * @base: phys starting address of the  boot memory block
 * @size: size of the boot memory block in bytes
 *
 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
 * The freeing memory will not be released to the buddy allocator.
 */
void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
{
	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
		     __func__, (u64)base, (u64)base + size - 1,
		     (void *)_RET_IP_);
	kmemleak_free_part(__va(base), size);
1366
	memblock_remove_range(&memblock.reserved, base, size);
1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393
}

/*
 * __memblock_free_late - free bootmem block pages directly to buddy allocator
 * @addr: phys starting address of the  boot memory block
 * @size: size of the boot memory block in bytes
 *
 * This is only useful when the bootmem allocator has already been torn
 * down, but we are still initializing the system.  Pages are released directly
 * to the buddy allocator, no bootmem metadata is updated because it is gone.
 */
void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
{
	u64 cursor, end;

	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
		     __func__, (u64)base, (u64)base + size - 1,
		     (void *)_RET_IP_);
	kmemleak_free_part(__va(base), size);
	cursor = PFN_UP(base);
	end = PFN_DOWN(base + size);

	for (; cursor < end; cursor++) {
		__free_pages_bootmem(pfn_to_page(cursor), 0);
		totalram_pages++;
	}
}
1394 1395 1396 1397 1398

/*
 * Remaining API functions
 */

1399
phys_addr_t __init memblock_phys_mem_size(void)
Y
Yinghai Lu 已提交
1400
{
1401
	return memblock.memory.total_size;
Y
Yinghai Lu 已提交
1402 1403
}

Y
Yinghai Lu 已提交
1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417
phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
{
	unsigned long pages = 0;
	struct memblock_region *r;
	unsigned long start_pfn, end_pfn;

	for_each_memblock(memory, r) {
		start_pfn = memblock_region_memory_base_pfn(r);
		end_pfn = memblock_region_memory_end_pfn(r);
		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
		pages += end_pfn - start_pfn;
	}

F
Fabian Frederick 已提交
1418
	return PFN_PHYS(pages);
Y
Yinghai Lu 已提交
1419 1420
}

1421 1422 1423 1424 1425 1426
/* lowest address */
phys_addr_t __init_memblock memblock_start_of_DRAM(void)
{
	return memblock.memory.regions[0].base;
}

1427
phys_addr_t __init_memblock memblock_end_of_DRAM(void)
Y
Yinghai Lu 已提交
1428 1429 1430
{
	int idx = memblock.memory.cnt - 1;

1431
	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
Y
Yinghai Lu 已提交
1432 1433
}

1434
void __init memblock_enforce_memory_limit(phys_addr_t limit)
Y
Yinghai Lu 已提交
1435
{
1436
	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
E
Emil Medve 已提交
1437
	struct memblock_region *r;
Y
Yinghai Lu 已提交
1438

1439
	if (!limit)
Y
Yinghai Lu 已提交
1440 1441
		return;

1442
	/* find out max address */
E
Emil Medve 已提交
1443
	for_each_memblock(memory, r) {
1444 1445 1446
		if (limit <= r->size) {
			max_addr = r->base + limit;
			break;
Y
Yinghai Lu 已提交
1447
		}
1448
		limit -= r->size;
Y
Yinghai Lu 已提交
1449
	}
1450 1451

	/* truncate both memory and reserved regions */
1452 1453 1454 1455
	memblock_remove_range(&memblock.memory, max_addr,
			      (phys_addr_t)ULLONG_MAX);
	memblock_remove_range(&memblock.reserved, max_addr,
			      (phys_addr_t)ULLONG_MAX);
Y
Yinghai Lu 已提交
1456 1457
}

1458
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475
{
	unsigned int left = 0, right = type->cnt;

	do {
		unsigned int mid = (right + left) / 2;

		if (addr < type->regions[mid].base)
			right = mid;
		else if (addr >= (type->regions[mid].base +
				  type->regions[mid].size))
			left = mid + 1;
		else
			return mid;
	} while (left < right);
	return -1;
}

1476
int __init memblock_is_reserved(phys_addr_t addr)
Y
Yinghai Lu 已提交
1477
{
1478 1479
	return memblock_search(&memblock.reserved, addr) != -1;
}
Y
Yinghai Lu 已提交
1480

1481
int __init_memblock memblock_is_memory(phys_addr_t addr)
1482 1483 1484 1485
{
	return memblock_search(&memblock.memory, addr) != -1;
}

1486 1487 1488 1489 1490
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
			 unsigned long *start_pfn, unsigned long *end_pfn)
{
	struct memblock_type *type = &memblock.memory;
F
Fabian Frederick 已提交
1491
	int mid = memblock_search(type, PFN_PHYS(pfn));
1492 1493 1494 1495

	if (mid == -1)
		return -1;

F
Fabian Frederick 已提交
1496 1497
	*start_pfn = PFN_DOWN(type->regions[mid].base);
	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1498 1499 1500 1501 1502

	return type->regions[mid].nid;
}
#endif

1503 1504 1505 1506 1507 1508 1509 1510 1511 1512
/**
 * memblock_is_region_memory - check if a region is a subset of memory
 * @base: base of region to check
 * @size: size of region to check
 *
 * Check if the region [@base, @base+@size) is a subset of a memory block.
 *
 * RETURNS:
 * 0 if false, non-zero if true
 */
1513
int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1514
{
1515
	int idx = memblock_search(&memblock.memory, base);
1516
	phys_addr_t end = base + memblock_cap_size(base, &size);
1517 1518 1519

	if (idx == -1)
		return 0;
1520 1521
	return memblock.memory.regions[idx].base <= base &&
		(memblock.memory.regions[idx].base +
1522
		 memblock.memory.regions[idx].size) >= end;
Y
Yinghai Lu 已提交
1523 1524
}

1525 1526 1527 1528 1529 1530 1531 1532 1533 1534
/**
 * memblock_is_region_reserved - check if a region intersects reserved memory
 * @base: base of region to check
 * @size: size of region to check
 *
 * Check if the region [@base, @base+@size) intersects a reserved memory block.
 *
 * RETURNS:
 * 0 if false, non-zero if true
 */
1535
int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
1536
{
1537
	memblock_cap_size(base, &size);
1538
	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
Y
Yinghai Lu 已提交
1539 1540
}

1541 1542 1543
void __init_memblock memblock_trim_memory(phys_addr_t align)
{
	phys_addr_t start, end, orig_start, orig_end;
E
Emil Medve 已提交
1544
	struct memblock_region *r;
1545

E
Emil Medve 已提交
1546 1547 1548
	for_each_memblock(memory, r) {
		orig_start = r->base;
		orig_end = r->base + r->size;
1549 1550 1551 1552 1553 1554 1555
		start = round_up(orig_start, align);
		end = round_down(orig_end, align);

		if (start == orig_start && end == orig_end)
			continue;

		if (start < end) {
E
Emil Medve 已提交
1556 1557
			r->base = start;
			r->size = end - start;
1558
		} else {
E
Emil Medve 已提交
1559 1560 1561
			memblock_remove_region(&memblock.memory,
					       r - memblock.memory.regions);
			r--;
1562 1563 1564
		}
	}
}
1565

1566
void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1567 1568 1569 1570
{
	memblock.current_limit = limit;
}

1571 1572 1573 1574 1575
phys_addr_t __init_memblock memblock_get_current_limit(void)
{
	return memblock.current_limit;
}

T
Tejun Heo 已提交
1576
static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1577 1578
{
	unsigned long long base, size;
1579
	unsigned long flags;
1580 1581
	int i;

T
Tejun Heo 已提交
1582
	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
1583

T
Tejun Heo 已提交
1584 1585 1586 1587 1588 1589
	for (i = 0; i < type->cnt; i++) {
		struct memblock_region *rgn = &type->regions[i];
		char nid_buf[32] = "";

		base = rgn->base;
		size = rgn->size;
1590
		flags = rgn->flags;
T
Tejun Heo 已提交
1591 1592 1593 1594 1595
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
				 memblock_get_region_node(rgn));
#endif
1596 1597
		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
			name, i, base, base + size - 1, size, nid_buf, flags);
1598 1599 1600
	}
}

T
Tejun Heo 已提交
1601
void __init_memblock __memblock_dump_all(void)
1602 1603
{
	pr_info("MEMBLOCK configuration:\n");
1604 1605 1606
	pr_info(" memory size = %#llx reserved size = %#llx\n",
		(unsigned long long)memblock.memory.total_size,
		(unsigned long long)memblock.reserved.total_size);
1607 1608 1609 1610 1611

	memblock_dump(&memblock.memory, "memory");
	memblock_dump(&memblock.reserved, "reserved");
}

1612
void __init memblock_allow_resize(void)
1613
{
1614
	memblock_can_resize = 1;
1615 1616 1617 1618 1619 1620 1621 1622 1623 1624
}

static int __init early_memblock(char *p)
{
	if (p && strstr(p, "debug"))
		memblock_debug = 1;
	return 0;
}
early_param("memblock", early_memblock);

1625
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667

static int memblock_debug_show(struct seq_file *m, void *private)
{
	struct memblock_type *type = m->private;
	struct memblock_region *reg;
	int i;

	for (i = 0; i < type->cnt; i++) {
		reg = &type->regions[i];
		seq_printf(m, "%4d: ", i);
		if (sizeof(phys_addr_t) == 4)
			seq_printf(m, "0x%08lx..0x%08lx\n",
				   (unsigned long)reg->base,
				   (unsigned long)(reg->base + reg->size - 1));
		else
			seq_printf(m, "0x%016llx..0x%016llx\n",
				   (unsigned long long)reg->base,
				   (unsigned long long)(reg->base + reg->size - 1));

	}
	return 0;
}

static int memblock_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, memblock_debug_show, inode->i_private);
}

static const struct file_operations memblock_debug_fops = {
	.open = memblock_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init memblock_init_debugfs(void)
{
	struct dentry *root = debugfs_create_dir("memblock", NULL);
	if (!root)
		return -ENXIO;
	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1668 1669 1670
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
#endif
1671 1672 1673 1674 1675 1676

	return 0;
}
__initcall(memblock_init_debugfs);

#endif /* CONFIG_DEBUG_FS */