memblock.c 49.8 KB
Newer Older
Y
Yinghai Lu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Procedures for maintaining information about logical memory blocks.
 *
 * Peter Bergner, IBM Corp.	June 2001.
 * Copyright (C) 2001 Peter Bergner.
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
14
#include <linux/slab.h>
Y
Yinghai Lu 已提交
15 16
#include <linux/init.h>
#include <linux/bitops.h>
17
#include <linux/poison.h>
18
#include <linux/pfn.h>
19 20
#include <linux/debugfs.h>
#include <linux/seq_file.h>
Y
Yinghai Lu 已提交
21 22
#include <linux/memblock.h>

23
#include <asm/sections.h>
24 25 26
#include <linux/io.h>

#include "internal.h"
27

T
Tejun Heo 已提交
28 29
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30 31 32
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
#endif
T
Tejun Heo 已提交
33 34 35 36 37 38 39 40 41 42

struct memblock memblock __initdata_memblock = {
	.memory.regions		= memblock_memory_init_regions,
	.memory.cnt		= 1,	/* empty dummy entry */
	.memory.max		= INIT_MEMBLOCK_REGIONS,

	.reserved.regions	= memblock_reserved_init_regions,
	.reserved.cnt		= 1,	/* empty dummy entry */
	.reserved.max		= INIT_MEMBLOCK_REGIONS,

43 44 45 46 47 48
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	.physmem.regions	= memblock_physmem_init_regions,
	.physmem.cnt		= 1,	/* empty dummy entry */
	.physmem.max		= INIT_PHYSMEM_REGIONS,
#endif

49
	.bottom_up		= false,
T
Tejun Heo 已提交
50 51
	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
};
Y
Yinghai Lu 已提交
52

53
int memblock_debug __initdata_memblock;
54 55 56
#ifdef CONFIG_MOVABLE_NODE
bool movable_node_enabled __initdata_memblock = false;
#endif
57
static bool system_has_some_mirror __initdata_memblock = false;
58
static int memblock_can_resize __initdata_memblock;
59 60
static int memblock_memory_in_slab __initdata_memblock = 0;
static int memblock_reserved_in_slab __initdata_memblock = 0;
Y
Yinghai Lu 已提交
61

62 63 64 65 66
ulong __init_memblock choose_memblock_flags(void)
{
	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
}

67
/* inline so we don't get a warning when pr_debug is compiled out */
68 69
static __init_memblock const char *
memblock_type_name(struct memblock_type *type)
70 71 72 73 74
{
	if (type == &memblock.memory)
		return "memory";
	else if (type == &memblock.reserved)
		return "reserved";
75 76 77 78
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	else if (type == &memblock.physmem)
		return "physmem";
#endif
79 80 81 82
	else
		return "unknown";
}

83 84 85 86 87 88
/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
{
	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
}

89 90 91
/*
 * Address comparison utilities
 */
92
static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
93
				       phys_addr_t base2, phys_addr_t size2)
Y
Yinghai Lu 已提交
94 95 96 97
{
	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
}

98
bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
99
					phys_addr_t base, phys_addr_t size)
100 101 102
{
	unsigned long i;

103 104 105
	for (i = 0; i < type->cnt; i++)
		if (memblock_addrs_overlap(base, size, type->regions[i].base,
					   type->regions[i].size))
106
			break;
107
	return i < type->cnt;
108 109
}

110 111 112 113 114 115
/*
 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
116
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
117
 * @flags: pick from blocks based on memory attributes
118 119 120 121 122 123 124 125
 *
 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 *
 * RETURNS:
 * Found address on success, 0 on failure.
 */
static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
126 127
				phys_addr_t size, phys_addr_t align, int nid,
				ulong flags)
128 129 130 131
{
	phys_addr_t this_start, this_end, cand;
	u64 i;

132
	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
133 134 135 136 137 138 139 140 141 142 143
		this_start = clamp(this_start, start, end);
		this_end = clamp(this_end, start, end);

		cand = round_up(this_start, align);
		if (cand < this_end && this_end - cand >= size)
			return cand;
	}

	return 0;
}

144
/**
145
 * __memblock_find_range_top_down - find free area utility, in top-down
146 147 148 149
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
150
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
151
 * @flags: pick from blocks based on memory attributes
152
 *
153
 * Utility called from memblock_find_in_range_node(), find free area top-down.
154 155
 *
 * RETURNS:
156
 * Found address on success, 0 on failure.
157
 */
158 159
static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
160 161
			       phys_addr_t size, phys_addr_t align, int nid,
			       ulong flags)
162 163 164 165
{
	phys_addr_t this_start, this_end, cand;
	u64 i;

166 167
	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
					NULL) {
168 169 170 171 172 173 174 175 176 177
		this_start = clamp(this_start, start, end);
		this_end = clamp(this_end, start, end);

		if (this_end < size)
			continue;

		cand = round_down(this_end - size, align);
		if (cand >= this_start)
			return cand;
	}
178

179 180
	return 0;
}
181

182 183 184 185
/**
 * memblock_find_in_range_node - find free area in given range and node
 * @size: size of free area to find
 * @align: alignment of free area to find
186 187
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
188
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
189
 * @flags: pick from blocks based on memory attributes
190 191 192
 *
 * Find @size free area aligned to @align in the specified range and node.
 *
193 194 195 196 197 198 199 200
 * When allocation direction is bottom-up, the @start should be greater
 * than the end of the kernel image. Otherwise, it will be trimmed. The
 * reason is that we want the bottom-up allocation just near the kernel
 * image so it is highly likely that the allocated memory and the kernel
 * will reside in the same node.
 *
 * If bottom-up allocation failed, will try to allocate memory top-down.
 *
201
 * RETURNS:
202
 * Found address on success, 0 on failure.
203
 */
204 205
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
					phys_addr_t align, phys_addr_t start,
206
					phys_addr_t end, int nid, ulong flags)
207
{
208
	phys_addr_t kernel_end, ret;
209

210 211 212 213 214 215 216
	/* pump up @end */
	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
		end = memblock.current_limit;

	/* avoid allocating the first page */
	start = max_t(phys_addr_t, start, PAGE_SIZE);
	end = max(start, end);
217 218 219 220 221 222 223 224 225 226 227 228 229 230
	kernel_end = __pa_symbol(_end);

	/*
	 * try bottom-up allocation only when bottom-up mode
	 * is set and @end is above the kernel image.
	 */
	if (memblock_bottom_up() && end > kernel_end) {
		phys_addr_t bottom_up_start;

		/* make sure we will allocate above the kernel */
		bottom_up_start = max(start, kernel_end);

		/* ok, try bottom-up allocation first */
		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
231
						      size, align, nid, flags);
232 233 234 235 236 237 238 239 240 241 242 243 244
		if (ret)
			return ret;

		/*
		 * we always limit bottom-up allocation above the kernel,
		 * but top-down allocation doesn't have the limit, so
		 * retrying top-down allocation may succeed when bottom-up
		 * allocation failed.
		 *
		 * bottom-up allocation is expected to be fail very rarely,
		 * so we use WARN_ONCE() here to see the stack trace if
		 * fail happens.
		 */
J
Joe Perches 已提交
245
		WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
246
	}
247

248 249
	return __memblock_find_range_top_down(start, end, size, align, nid,
					      flags);
250 251
}

252 253 254 255 256 257 258 259 260 261
/**
 * memblock_find_in_range - find free area in given range
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
 *
 * Find @size free area aligned to @align in the specified range.
 *
 * RETURNS:
262
 * Found address on success, 0 on failure.
263
 */
264 265 266
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
					phys_addr_t end, phys_addr_t size,
					phys_addr_t align)
267
{
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	phys_addr_t ret;
	ulong flags = choose_memblock_flags();

again:
	ret = memblock_find_in_range_node(size, align, start, end,
					    NUMA_NO_NODE, flags);

	if (!ret && (flags & MEMBLOCK_MIRROR)) {
		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
			&size);
		flags &= ~MEMBLOCK_MIRROR;
		goto again;
	}

	return ret;
283 284
}

285
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
Y
Yinghai Lu 已提交
286
{
287
	type->total_size -= type->regions[r].size;
T
Tejun Heo 已提交
288 289
	memmove(&type->regions[r], &type->regions[r + 1],
		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
290
	type->cnt--;
Y
Yinghai Lu 已提交
291

292 293
	/* Special case for empty arrays */
	if (type->cnt == 0) {
294
		WARN_ON(type->total_size != 0);
295 296 297
		type->cnt = 1;
		type->regions[0].base = 0;
		type->regions[0].size = 0;
298
		type->regions[0].flags = 0;
T
Tejun Heo 已提交
299
		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
300
	}
Y
Yinghai Lu 已提交
301 302
}

303 304
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK

305 306 307 308 309 310 311 312 313 314 315 316
phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
					phys_addr_t *addr)
{
	if (memblock.reserved.regions == memblock_reserved_init_regions)
		return 0;

	*addr = __pa(memblock.reserved.regions);

	return PAGE_ALIGN(sizeof(struct memblock_region) *
			  memblock.reserved.max);
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330
phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
					phys_addr_t *addr)
{
	if (memblock.memory.regions == memblock_memory_init_regions)
		return 0;

	*addr = __pa(memblock.memory.regions);

	return PAGE_ALIGN(sizeof(struct memblock_region) *
			  memblock.memory.max);
}

#endif

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
/**
 * memblock_double_array - double the size of the memblock regions array
 * @type: memblock type of the regions array being doubled
 * @new_area_start: starting address of memory range to avoid overlap with
 * @new_area_size: size of memory range to avoid overlap with
 *
 * Double the size of the @type regions array. If memblock is being used to
 * allocate memory for a new reserved regions array and there is a previously
 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
 * waiting to be reserved, ensure the memory used by the new array does
 * not overlap.
 *
 * RETURNS:
 * 0 on success, -1 on failure.
 */
static int __init_memblock memblock_double_array(struct memblock_type *type,
						phys_addr_t new_area_start,
						phys_addr_t new_area_size)
349 350
{
	struct memblock_region *new_array, *old_array;
351
	phys_addr_t old_alloc_size, new_alloc_size;
352 353
	phys_addr_t old_size, new_size, addr;
	int use_slab = slab_is_available();
354
	int *in_slab;
355 356 357 358 359 360 361 362 363 364

	/* We don't allow resizing until we know about the reserved regions
	 * of memory that aren't suitable for allocation
	 */
	if (!memblock_can_resize)
		return -1;

	/* Calculate new doubled size */
	old_size = type->max * sizeof(struct memblock_region);
	new_size = old_size << 1;
365 366 367 368 369 370
	/*
	 * We need to allocated new one align to PAGE_SIZE,
	 *   so we can free them completely later.
	 */
	old_alloc_size = PAGE_ALIGN(old_size);
	new_alloc_size = PAGE_ALIGN(new_size);
371

372 373 374 375 376 377
	/* Retrieve the slab flag */
	if (type == &memblock.memory)
		in_slab = &memblock_memory_in_slab;
	else
		in_slab = &memblock_reserved_in_slab;

378 379 380
	/* Try to find some space for it.
	 *
	 * WARNING: We assume that either slab_is_available() and we use it or
381 382 383
	 * we use MEMBLOCK for allocations. That means that this is unsafe to
	 * use when bootmem is currently active (unless bootmem itself is
	 * implemented on top of MEMBLOCK which isn't the case yet)
384 385
	 *
	 * This should however not be an issue for now, as we currently only
386 387
	 * call into MEMBLOCK while it's still active, or much later when slab
	 * is active for memory hotplug operations
388 389 390
	 */
	if (use_slab) {
		new_array = kmalloc(new_size, GFP_KERNEL);
T
Tejun Heo 已提交
391
		addr = new_array ? __pa(new_array) : 0;
392
	} else {
393 394 395 396 397 398
		/* only exclude range when trying to double reserved.regions */
		if (type != &memblock.reserved)
			new_area_start = new_area_size = 0;

		addr = memblock_find_in_range(new_area_start + new_area_size,
						memblock.current_limit,
399
						new_alloc_size, PAGE_SIZE);
400 401
		if (!addr && new_area_size)
			addr = memblock_find_in_range(0,
402 403
				min(new_area_start, memblock.current_limit),
				new_alloc_size, PAGE_SIZE);
404

405
		new_array = addr ? __va(addr) : NULL;
406
	}
T
Tejun Heo 已提交
407
	if (!addr) {
408 409 410 411 412
		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
		       memblock_type_name(type), type->max, type->max * 2);
		return -1;
	}

413 414 415
	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
			memblock_type_name(type), type->max * 2, (u64)addr,
			(u64)addr + new_size - 1);
416

417 418 419 420
	/*
	 * Found space, we now need to move the array over before we add the
	 * reserved region since it may be our reserved array itself that is
	 * full.
421 422 423 424 425 426 427
	 */
	memcpy(new_array, type->regions, old_size);
	memset(new_array + type->max, 0, old_size);
	old_array = type->regions;
	type->regions = new_array;
	type->max <<= 1;

428
	/* Free old array. We needn't free it if the array is the static one */
429 430 431 432
	if (*in_slab)
		kfree(old_array);
	else if (old_array != memblock_memory_init_regions &&
		 old_array != memblock_reserved_init_regions)
433
		memblock_free(__pa(old_array), old_alloc_size);
434

435 436 437
	/*
	 * Reserve the new array if that comes from the memblock.  Otherwise, we
	 * needn't do it
438 439
	 */
	if (!use_slab)
440
		BUG_ON(memblock_reserve(addr, new_alloc_size));
441 442 443 444

	/* Update slab flag */
	*in_slab = use_slab;

445 446 447
	return 0;
}

448 449 450 451 452 453 454
/**
 * memblock_merge_regions - merge neighboring compatible regions
 * @type: memblock type to scan
 *
 * Scan @type and merge neighboring compatible regions.
 */
static void __init_memblock memblock_merge_regions(struct memblock_type *type)
Y
Yinghai Lu 已提交
455
{
456
	int i = 0;
Y
Yinghai Lu 已提交
457

458 459 460 461
	/* cnt never goes below 1 */
	while (i < type->cnt - 1) {
		struct memblock_region *this = &type->regions[i];
		struct memblock_region *next = &type->regions[i + 1];
Y
Yinghai Lu 已提交
462

T
Tejun Heo 已提交
463 464
		if (this->base + this->size != next->base ||
		    memblock_get_region_node(this) !=
465 466
		    memblock_get_region_node(next) ||
		    this->flags != next->flags) {
467 468 469
			BUG_ON(this->base + this->size > next->base);
			i++;
			continue;
470 471
		}

472
		this->size += next->size;
473 474
		/* move forward from next + 1, index of which is i + 2 */
		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
475
		type->cnt--;
Y
Yinghai Lu 已提交
476
	}
477
}
Y
Yinghai Lu 已提交
478

479 480
/**
 * memblock_insert_region - insert new memblock region
481 482 483 484 485
 * @type:	memblock type to insert into
 * @idx:	index for the insertion point
 * @base:	base address of the new region
 * @size:	size of the new region
 * @nid:	node id of the new region
486
 * @flags:	flags of the new region
487 488
 *
 * Insert new memblock region [@base,@base+@size) into @type at @idx.
489
 * @type must already have extra room to accommodate the new region.
490 491 492
 */
static void __init_memblock memblock_insert_region(struct memblock_type *type,
						   int idx, phys_addr_t base,
493 494
						   phys_addr_t size,
						   int nid, unsigned long flags)
495 496 497 498 499 500 501
{
	struct memblock_region *rgn = &type->regions[idx];

	BUG_ON(type->cnt >= type->max);
	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
	rgn->base = base;
	rgn->size = size;
502
	rgn->flags = flags;
T
Tejun Heo 已提交
503
	memblock_set_region_node(rgn, nid);
504
	type->cnt++;
505
	type->total_size += size;
506 507 508
}

/**
509
 * memblock_add_range - add new memblock region
510 511 512
 * @type: memblock type to add new region into
 * @base: base address of the new region
 * @size: size of the new region
513
 * @nid: nid of the new region
514
 * @flags: flags of the new region
515 516 517 518 519 520 521 522 523
 *
 * Add new memblock region [@base,@base+@size) into @type.  The new region
 * is allowed to overlap with existing ones - overlaps don't affect already
 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 * compatible regions are merged) after the addition.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
524
int __init_memblock memblock_add_range(struct memblock_type *type,
525 526
				phys_addr_t base, phys_addr_t size,
				int nid, unsigned long flags)
527 528
{
	bool insert = false;
529 530
	phys_addr_t obase = base;
	phys_addr_t end = base + memblock_cap_size(base, &size);
531 532
	int idx, nr_new;
	struct memblock_region *rgn;
533

534 535 536
	if (!size)
		return 0;

537 538
	/* special case for empty array */
	if (type->regions[0].size == 0) {
539
		WARN_ON(type->cnt != 1 || type->total_size);
540 541
		type->regions[0].base = base;
		type->regions[0].size = size;
542
		type->regions[0].flags = flags;
543
		memblock_set_region_node(&type->regions[0], nid);
544
		type->total_size = size;
545
		return 0;
Y
Yinghai Lu 已提交
546
	}
547 548 549 550
repeat:
	/*
	 * The following is executed twice.  Once with %false @insert and
	 * then with %true.  The first counts the number of regions needed
551
	 * to accommodate the new area.  The second actually inserts them.
552
	 */
553 554
	base = obase;
	nr_new = 0;
Y
Yinghai Lu 已提交
555

556
	for_each_memblock_type(type, rgn) {
557 558 559 560
		phys_addr_t rbase = rgn->base;
		phys_addr_t rend = rbase + rgn->size;

		if (rbase >= end)
Y
Yinghai Lu 已提交
561
			break;
562 563 564 565 566 567 568
		if (rend <= base)
			continue;
		/*
		 * @rgn overlaps.  If it separates the lower part of new
		 * area, insert that portion.
		 */
		if (rbase > base) {
569 570 571
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
			WARN_ON(nid != memblock_get_region_node(rgn));
#endif
572
			WARN_ON(flags != rgn->flags);
573 574
			nr_new++;
			if (insert)
575
				memblock_insert_region(type, idx++, base,
576 577
						       rbase - base, nid,
						       flags);
Y
Yinghai Lu 已提交
578
		}
579 580
		/* area below @rend is dealt with, forget about it */
		base = min(rend, end);
Y
Yinghai Lu 已提交
581
	}
582 583 584 585 586

	/* insert the remaining portion */
	if (base < end) {
		nr_new++;
		if (insert)
587
			memblock_insert_region(type, idx, base, end - base,
588
					       nid, flags);
Y
Yinghai Lu 已提交
589 590
	}

591 592 593
	if (!nr_new)
		return 0;

594 595 596
	/*
	 * If this was the first round, resize array and repeat for actual
	 * insertions; otherwise, merge and return.
597
	 */
598 599
	if (!insert) {
		while (type->cnt + nr_new > type->max)
600
			if (memblock_double_array(type, obase, size) < 0)
601 602 603 604 605 606
				return -ENOMEM;
		insert = true;
		goto repeat;
	} else {
		memblock_merge_regions(type);
		return 0;
607
	}
Y
Yinghai Lu 已提交
608 609
}

610 611 612
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
				       int nid)
{
613
	return memblock_add_range(&memblock.memory, base, size, nid, 0);
614 615
}

616
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
617
{
618 619 620 621
	phys_addr_t end = base + size - 1;

	memblock_dbg("memblock_add: [%pa-%pa] %pF\n",
		     &base, &end, (void *)_RET_IP_);
622

623
	return memblock_add_range(&memblock.memory, base, size, MAX_NUMNODES, 0);
Y
Yinghai Lu 已提交
624 625
}

626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645
/**
 * memblock_isolate_range - isolate given range into disjoint memblocks
 * @type: memblock type to isolate range for
 * @base: base of range to isolate
 * @size: size of range to isolate
 * @start_rgn: out parameter for the start of isolated region
 * @end_rgn: out parameter for the end of isolated region
 *
 * Walk @type and ensure that regions don't cross the boundaries defined by
 * [@base,@base+@size).  Crossing regions are split at the boundaries,
 * which may create at most two more regions.  The index of the first
 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int __init_memblock memblock_isolate_range(struct memblock_type *type,
					phys_addr_t base, phys_addr_t size,
					int *start_rgn, int *end_rgn)
{
646
	phys_addr_t end = base + memblock_cap_size(base, &size);
647 648
	int idx;
	struct memblock_region *rgn;
649 650 651

	*start_rgn = *end_rgn = 0;

652 653 654
	if (!size)
		return 0;

655 656
	/* we'll create at most two more regions */
	while (type->cnt + 2 > type->max)
657
		if (memblock_double_array(type, base, size) < 0)
658 659
			return -ENOMEM;

660
	for_each_memblock_type(type, rgn) {
661 662 663 664 665 666 667 668 669 670 671 672 673 674
		phys_addr_t rbase = rgn->base;
		phys_addr_t rend = rbase + rgn->size;

		if (rbase >= end)
			break;
		if (rend <= base)
			continue;

		if (rbase < base) {
			/*
			 * @rgn intersects from below.  Split and continue
			 * to process the next region - the new top half.
			 */
			rgn->base = base;
675 676
			rgn->size -= base - rbase;
			type->total_size -= base - rbase;
677
			memblock_insert_region(type, idx, rbase, base - rbase,
678 679
					       memblock_get_region_node(rgn),
					       rgn->flags);
680 681 682 683 684 685
		} else if (rend > end) {
			/*
			 * @rgn intersects from above.  Split and redo the
			 * current region - the new bottom half.
			 */
			rgn->base = end;
686 687
			rgn->size -= end - rbase;
			type->total_size -= end - rbase;
688
			memblock_insert_region(type, idx--, rbase, end - rbase,
689 690
					       memblock_get_region_node(rgn),
					       rgn->flags);
691 692 693
		} else {
			/* @rgn is fully contained, record it */
			if (!*end_rgn)
694 695
				*start_rgn = idx;
			*end_rgn = idx + 1;
696 697 698 699 700 701
		}
	}

	return 0;
}

702
static int __init_memblock memblock_remove_range(struct memblock_type *type,
703
					  phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
704
{
705 706
	int start_rgn, end_rgn;
	int i, ret;
Y
Yinghai Lu 已提交
707

708 709 710
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;
Y
Yinghai Lu 已提交
711

712 713
	for (i = end_rgn - 1; i >= start_rgn; i--)
		memblock_remove_region(type, i);
714
	return 0;
Y
Yinghai Lu 已提交
715 716
}

717
int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
718
{
719
	return memblock_remove_range(&memblock.memory, base, size);
Y
Yinghai Lu 已提交
720 721
}

722

723
int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
724
{
725 726 727 728
	phys_addr_t end = base + size - 1;

	memblock_dbg("   memblock_free: [%pa-%pa] %pF\n",
		     &base, &end, (void *)_RET_IP_);
729

730
	kmemleak_free_part_phys(base, size);
731
	return memblock_remove_range(&memblock.reserved, base, size);
Y
Yinghai Lu 已提交
732 733
}

734
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
735
{
736 737 738 739
	phys_addr_t end = base + size - 1;

	memblock_dbg("memblock_reserve: [%pa-%pa] %pF\n",
		     &base, &end, (void *)_RET_IP_);
Y
Yinghai Lu 已提交
740

741
	return memblock_add_range(&memblock.reserved, base, size, MAX_NUMNODES, 0);
Y
Yinghai Lu 已提交
742 743
}

744 745
/**
 *
746
 * This function isolates region [@base, @base + @size), and sets/clears flag
747
 *
748
 * Return 0 on success, -errno on failure.
749
 */
750 751
static int __init_memblock memblock_setclr_flag(phys_addr_t base,
				phys_addr_t size, int set, int flag)
752 753 754 755 756 757 758 759 760
{
	struct memblock_type *type = &memblock.memory;
	int i, ret, start_rgn, end_rgn;

	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;

	for (i = start_rgn; i < end_rgn; i++)
761 762 763 764
		if (set)
			memblock_set_region_flags(&type->regions[i], flag);
		else
			memblock_clear_region_flags(&type->regions[i], flag);
765 766 767 768 769 770

	memblock_merge_regions(type);
	return 0;
}

/**
771
 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
772 773 774
 * @base: the base phys addr of the region
 * @size: the size of the region
 *
775
 * Return 0 on success, -errno on failure.
776 777 778 779 780 781 782 783 784 785
 */
int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
{
	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
}

/**
 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 * @base: the base phys addr of the region
 * @size: the size of the region
786
 *
787
 * Return 0 on success, -errno on failure.
788 789 790
 */
int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
{
791
	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
792 793
}

794 795 796 797 798
/**
 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 * @base: the base phys addr of the region
 * @size: the size of the region
 *
799
 * Return 0 on success, -errno on failure.
800 801 802 803 804 805 806 807
 */
int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
{
	system_has_some_mirror = true;

	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
}

808 809 810 811 812 813 814 815 816 817 818
/**
 * memblock_mark_nomap - Mark a memory region with flag MEMBLOCK_NOMAP.
 * @base: the base phys addr of the region
 * @size: the size of the region
 *
 * Return 0 on success, -errno on failure.
 */
int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
{
	return memblock_setclr_flag(base, size, 1, MEMBLOCK_NOMAP);
}
819

820 821 822 823 824 825 826 827 828 829 830 831
/**
 * __next_reserved_mem_region - next function for for_each_reserved_region()
 * @idx: pointer to u64 loop variable
 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
 *
 * Iterate over all reserved memory regions.
 */
void __init_memblock __next_reserved_mem_region(u64 *idx,
					   phys_addr_t *out_start,
					   phys_addr_t *out_end)
{
832
	struct memblock_type *type = &memblock.reserved;
833

834
	if (*idx < type->cnt) {
835
		struct memblock_region *r = &type->regions[*idx];
836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
		phys_addr_t base = r->base;
		phys_addr_t size = r->size;

		if (out_start)
			*out_start = base;
		if (out_end)
			*out_end = base + size - 1;

		*idx += 1;
		return;
	}

	/* signal end of iteration */
	*idx = ULLONG_MAX;
}

852
/**
853
 * __next__mem_range - next function for for_each_free_mem_range() etc.
854
 * @idx: pointer to u64 loop variable
855
 * @nid: node selector, %NUMA_NO_NODE for all nodes
856
 * @flags: pick from blocks based on memory attributes
857 858
 * @type_a: pointer to memblock_type from where the range is taken
 * @type_b: pointer to memblock_type which excludes memory from being taken
W
Wanpeng Li 已提交
859 860 861
 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @out_nid: ptr to int for nid of the range, can be %NULL
862
 *
863
 * Find the first area from *@idx which matches @nid, fill the out
864
 * parameters, and update *@idx for the next iteration.  The lower 32bit of
865 866
 * *@idx contains index into type_a and the upper 32bit indexes the
 * areas before each region in type_b.	For example, if type_b regions
867 868 869 870 871 872 873 874 875 876 877
 * look like the following,
 *
 *	0:[0-16), 1:[32-48), 2:[128-130)
 *
 * The upper 32bit indexes the following regions.
 *
 *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 *
 * As both region arrays are sorted, the function advances the two indices
 * in lockstep and returns each intersection.
 */
878
void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
879 880 881 882
				      struct memblock_type *type_a,
				      struct memblock_type *type_b,
				      phys_addr_t *out_start,
				      phys_addr_t *out_end, int *out_nid)
883
{
884 885
	int idx_a = *idx & 0xffffffff;
	int idx_b = *idx >> 32;
886

887 888
	if (WARN_ONCE(nid == MAX_NUMNODES,
	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
889
		nid = NUMA_NO_NODE;
890

891 892 893
	for (; idx_a < type_a->cnt; idx_a++) {
		struct memblock_region *m = &type_a->regions[idx_a];

894 895
		phys_addr_t m_start = m->base;
		phys_addr_t m_end = m->base + m->size;
896
		int	    m_nid = memblock_get_region_node(m);
897 898

		/* only memory regions are associated with nodes, check it */
899
		if (nid != NUMA_NO_NODE && nid != m_nid)
900 901
			continue;

902 903 904 905
		/* skip hotpluggable memory regions if needed */
		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
			continue;

906 907 908 909
		/* if we want mirror memory skip non-mirror memory regions */
		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
			continue;

910 911 912 913
		/* skip nomap memory unless we were asked for it explicitly */
		if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
			continue;

914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935
		if (!type_b) {
			if (out_start)
				*out_start = m_start;
			if (out_end)
				*out_end = m_end;
			if (out_nid)
				*out_nid = m_nid;
			idx_a++;
			*idx = (u32)idx_a | (u64)idx_b << 32;
			return;
		}

		/* scan areas before each reservation */
		for (; idx_b < type_b->cnt + 1; idx_b++) {
			struct memblock_region *r;
			phys_addr_t r_start;
			phys_addr_t r_end;

			r = &type_b->regions[idx_b];
			r_start = idx_b ? r[-1].base + r[-1].size : 0;
			r_end = idx_b < type_b->cnt ?
				r->base : ULLONG_MAX;
936

937 938 939 940
			/*
			 * if idx_b advanced past idx_a,
			 * break out to advance idx_a
			 */
941 942 943 944 945
			if (r_start >= m_end)
				break;
			/* if the two regions intersect, we're done */
			if (m_start < r_end) {
				if (out_start)
946 947
					*out_start =
						max(m_start, r_start);
948 949 950
				if (out_end)
					*out_end = min(m_end, r_end);
				if (out_nid)
951
					*out_nid = m_nid;
952
				/*
953 954
				 * The region which ends first is
				 * advanced for the next iteration.
955 956
				 */
				if (m_end <= r_end)
957
					idx_a++;
958
				else
959 960
					idx_b++;
				*idx = (u32)idx_a | (u64)idx_b << 32;
961 962 963 964 965 966 967 968 969
				return;
			}
		}
	}

	/* signal end of iteration */
	*idx = ULLONG_MAX;
}

970
/**
971 972 973 974 975
 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
 *
 * Finds the next range from type_a which is not marked as unsuitable
 * in type_b.
 *
976
 * @idx: pointer to u64 loop variable
977
 * @nid: node selector, %NUMA_NO_NODE for all nodes
978
 * @flags: pick from blocks based on memory attributes
979 980
 * @type_a: pointer to memblock_type from where the range is taken
 * @type_b: pointer to memblock_type which excludes memory from being taken
W
Wanpeng Li 已提交
981 982 983
 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @out_nid: ptr to int for nid of the range, can be %NULL
984
 *
985
 * Reverse of __next_mem_range().
986
 */
987
void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
988 989 990 991
					  struct memblock_type *type_a,
					  struct memblock_type *type_b,
					  phys_addr_t *out_start,
					  phys_addr_t *out_end, int *out_nid)
992
{
993 994
	int idx_a = *idx & 0xffffffff;
	int idx_b = *idx >> 32;
995

996 997
	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
		nid = NUMA_NO_NODE;
998 999

	if (*idx == (u64)ULLONG_MAX) {
1000
		idx_a = type_a->cnt - 1;
1001 1002 1003 1004
		if (type_b != NULL)
			idx_b = type_b->cnt;
		else
			idx_b = 0;
1005 1006
	}

1007 1008 1009
	for (; idx_a >= 0; idx_a--) {
		struct memblock_region *m = &type_a->regions[idx_a];

1010 1011
		phys_addr_t m_start = m->base;
		phys_addr_t m_end = m->base + m->size;
1012
		int m_nid = memblock_get_region_node(m);
1013 1014

		/* only memory regions are associated with nodes, check it */
1015
		if (nid != NUMA_NO_NODE && nid != m_nid)
1016 1017
			continue;

1018 1019 1020 1021
		/* skip hotpluggable memory regions if needed */
		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
			continue;

1022 1023 1024 1025
		/* if we want mirror memory skip non-mirror memory regions */
		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
			continue;

1026 1027 1028 1029
		/* skip nomap memory unless we were asked for it explicitly */
		if (!(flags & MEMBLOCK_NOMAP) && memblock_is_nomap(m))
			continue;

1030 1031 1032 1033 1034 1035 1036
		if (!type_b) {
			if (out_start)
				*out_start = m_start;
			if (out_end)
				*out_end = m_end;
			if (out_nid)
				*out_nid = m_nid;
1037
			idx_a--;
1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055
			*idx = (u32)idx_a | (u64)idx_b << 32;
			return;
		}

		/* scan areas before each reservation */
		for (; idx_b >= 0; idx_b--) {
			struct memblock_region *r;
			phys_addr_t r_start;
			phys_addr_t r_end;

			r = &type_b->regions[idx_b];
			r_start = idx_b ? r[-1].base + r[-1].size : 0;
			r_end = idx_b < type_b->cnt ?
				r->base : ULLONG_MAX;
			/*
			 * if idx_b advanced past idx_a,
			 * break out to advance idx_a
			 */
1056 1057 1058 1059 1060 1061 1062 1063 1064 1065

			if (r_end <= m_start)
				break;
			/* if the two regions intersect, we're done */
			if (m_end > r_start) {
				if (out_start)
					*out_start = max(m_start, r_start);
				if (out_end)
					*out_end = min(m_end, r_end);
				if (out_nid)
1066
					*out_nid = m_nid;
1067
				if (m_start >= r_start)
1068
					idx_a--;
1069
				else
1070 1071
					idx_b--;
				*idx = (u32)idx_a | (u64)idx_b << 32;
1072 1073 1074 1075
				return;
			}
		}
	}
1076
	/* signal end of iteration */
1077 1078 1079
	*idx = ULLONG_MAX;
}

T
Tejun Heo 已提交
1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
 * Common iterator interface used to define for_each_mem_range().
 */
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
				unsigned long *out_start_pfn,
				unsigned long *out_end_pfn, int *out_nid)
{
	struct memblock_type *type = &memblock.memory;
	struct memblock_region *r;

	while (++*idx < type->cnt) {
		r = &type->regions[*idx];

		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
			continue;
		if (nid == MAX_NUMNODES || nid == r->nid)
			break;
	}
	if (*idx >= type->cnt) {
		*idx = -1;
		return;
	}

	if (out_start_pfn)
		*out_start_pfn = PFN_UP(r->base);
	if (out_end_pfn)
		*out_end_pfn = PFN_DOWN(r->base + r->size);
	if (out_nid)
		*out_nid = r->nid;
}

1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136
unsigned long __init_memblock memblock_next_valid_pfn(unsigned long pfn,
						      unsigned long max_pfn)
{
	struct memblock_type *type = &memblock.memory;
	unsigned int right = type->cnt;
	unsigned int mid, left = 0;
	phys_addr_t addr = PFN_PHYS(pfn + 1);

	do {
		mid = (right + left) / 2;

		if (addr < type->regions[mid].base)
			right = mid;
		else if (addr >= (type->regions[mid].base +
				  type->regions[mid].size))
			left = mid + 1;
		else {
			/* addr is within the region, so pfn + 1 is valid */
			return min(pfn + 1, max_pfn);
		}
	} while (left < right);

	return min(PHYS_PFN(type->regions[right].base), max_pfn);
}

T
Tejun Heo 已提交
1137 1138 1139 1140
/**
 * memblock_set_node - set node ID on memblock regions
 * @base: base of area to set node ID for
 * @size: size of area to set node ID for
1141
 * @type: memblock type to set node ID for
T
Tejun Heo 已提交
1142 1143
 * @nid: node ID to set
 *
1144
 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
T
Tejun Heo 已提交
1145 1146 1147 1148 1149 1150
 * Regions which cross the area boundaries are split as necessary.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1151
				      struct memblock_type *type, int nid)
T
Tejun Heo 已提交
1152
{
1153 1154
	int start_rgn, end_rgn;
	int i, ret;
T
Tejun Heo 已提交
1155

1156 1157 1158
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;
T
Tejun Heo 已提交
1159

1160
	for (i = start_rgn; i < end_rgn; i++)
1161
		memblock_set_region_node(&type->regions[i], nid);
T
Tejun Heo 已提交
1162 1163 1164 1165 1166 1167

	memblock_merge_regions(type);
	return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

1168 1169
static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
					phys_addr_t align, phys_addr_t start,
1170
					phys_addr_t end, int nid, ulong flags)
Y
Yinghai Lu 已提交
1171
{
1172
	phys_addr_t found;
Y
Yinghai Lu 已提交
1173

1174 1175
	if (!align)
		align = SMP_CACHE_BYTES;
1176

1177 1178
	found = memblock_find_in_range_node(size, align, start, end, nid,
					    flags);
1179 1180 1181 1182 1183
	if (found && !memblock_reserve(found, size)) {
		/*
		 * The min_count is set to 0 so that memblock allocations are
		 * never reported as leaks.
		 */
1184
		kmemleak_alloc_phys(found, size, 0, 0);
1185
		return found;
1186
	}
1187
	return 0;
Y
Yinghai Lu 已提交
1188 1189
}

1190
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1191 1192
					phys_addr_t start, phys_addr_t end,
					ulong flags)
1193
{
1194 1195
	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
					flags);
1196 1197 1198 1199
}

static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
					phys_addr_t align, phys_addr_t max_addr,
1200
					int nid, ulong flags)
1201
{
1202
	return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1203 1204
}

1205 1206
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218
	ulong flags = choose_memblock_flags();
	phys_addr_t ret;

again:
	ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
				      nid, flags);

	if (!ret && (flags & MEMBLOCK_MIRROR)) {
		flags &= ~MEMBLOCK_MIRROR;
		goto again;
	}
	return ret;
1219 1220 1221 1222
}

phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
{
1223 1224
	return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
				       MEMBLOCK_NONE);
1225 1226
}

1227
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
Y
Yinghai Lu 已提交
1228
{
1229 1230 1231 1232 1233
	phys_addr_t alloc;

	alloc = __memblock_alloc_base(size, align, max_addr);

	if (alloc == 0)
1234 1235
		panic("ERROR: Failed to allocate %pa bytes below %pa.\n",
		      &size, &max_addr);
1236 1237

	return alloc;
Y
Yinghai Lu 已提交
1238 1239
}

1240
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
Y
Yinghai Lu 已提交
1241
{
1242 1243
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
Y
Yinghai Lu 已提交
1244

1245 1246 1247 1248 1249 1250
phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
	phys_addr_t res = memblock_alloc_nid(size, align, nid);

	if (res)
		return res;
1251
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
Y
Yinghai Lu 已提交
1252 1253
}

1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287
/**
 * memblock_virt_alloc_internal - allocate boot memory block
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region to allocate (phys address)
 * @max_addr: the upper bound of the memory region to allocate (phys address)
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * The @min_addr limit is dropped if it can not be satisfied and the allocation
 * will fall back to memory below @min_addr. Also, allocation may fall back
 * to any node in the system if the specified node can not
 * hold the requested memory.
 *
 * The allocation is performed from memory region limited by
 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
 *
 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
 *
 * The phys address of allocated boot memory block is converted to virtual and
 * allocated memory is reset to 0.
 *
 * In addition, function sets the min_count to 0 using kmemleak_alloc for
 * allocated boot memory block, so that it is never reported as leaks.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
static void * __init memblock_virt_alloc_internal(
				phys_addr_t size, phys_addr_t align,
				phys_addr_t min_addr, phys_addr_t max_addr,
				int nid)
{
	phys_addr_t alloc;
	void *ptr;
1288
	ulong flags = choose_memblock_flags();
1289

1290 1291
	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
		nid = NUMA_NO_NODE;
1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303

	/*
	 * Detect any accidental use of these APIs after slab is ready, as at
	 * this moment memblock may be deinitialized already and its
	 * internal data may be destroyed (after execution of free_all_bootmem)
	 */
	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc_node(size, GFP_NOWAIT, nid);

	if (!align)
		align = SMP_CACHE_BYTES;

1304 1305
	if (max_addr > memblock.current_limit)
		max_addr = memblock.current_limit;
1306 1307
again:
	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1308
					    nid, flags);
1309
	if (alloc && !memblock_reserve(alloc, size))
1310 1311 1312 1313
		goto done;

	if (nid != NUMA_NO_NODE) {
		alloc = memblock_find_in_range_node(size, align, min_addr,
1314
						    max_addr, NUMA_NO_NODE,
1315
						    flags);
1316
		if (alloc && !memblock_reserve(alloc, size))
1317 1318 1319 1320 1321 1322 1323 1324
			goto done;
	}

	if (min_addr) {
		min_addr = 0;
		goto again;
	}

1325 1326 1327 1328 1329 1330 1331 1332
	if (flags & MEMBLOCK_MIRROR) {
		flags &= ~MEMBLOCK_MIRROR;
		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
			&size);
		goto again;
	}

	return NULL;
1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428
done:
	ptr = phys_to_virt(alloc);
	memset(ptr, 0, size);

	/*
	 * The min_count is set to 0 so that bootmem allocated blocks
	 * are never reported as leaks. This is because many of these blocks
	 * are only referred via the physical address which is not
	 * looked up by kmemleak.
	 */
	kmemleak_alloc(ptr, size, 0, 0);

	return ptr;
}

/**
 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
 * additional debug information (including caller info), if enabled.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void * __init memblock_virt_alloc_try_nid_nopanic(
				phys_addr_t size, phys_addr_t align,
				phys_addr_t min_addr, phys_addr_t max_addr,
				int nid)
{
	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
		     (u64)max_addr, (void *)_RET_IP_);
	return memblock_virt_alloc_internal(size, align, min_addr,
					     max_addr, nid);
}

/**
 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
 * which provides debug information (including caller info), if enabled,
 * and panics if the request can not be satisfied.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void * __init memblock_virt_alloc_try_nid(
			phys_addr_t size, phys_addr_t align,
			phys_addr_t min_addr, phys_addr_t max_addr,
			int nid)
{
	void *ptr;

	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
		     (u64)max_addr, (void *)_RET_IP_);
	ptr = memblock_virt_alloc_internal(size, align,
					   min_addr, max_addr, nid);
	if (ptr)
		return ptr;

	panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
	      __func__, (u64)size, (u64)align, nid, (u64)min_addr,
	      (u64)max_addr);
	return NULL;
}

/**
 * __memblock_free_early - free boot memory block
 * @base: phys starting address of the  boot memory block
 * @size: size of the boot memory block in bytes
 *
 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
 * The freeing memory will not be released to the buddy allocator.
 */
void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
{
	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
		     __func__, (u64)base, (u64)base + size - 1,
		     (void *)_RET_IP_);
1429
	kmemleak_free_part_phys(base, size);
1430
	memblock_remove_range(&memblock.reserved, base, size);
1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448
}

/*
 * __memblock_free_late - free bootmem block pages directly to buddy allocator
 * @addr: phys starting address of the  boot memory block
 * @size: size of the boot memory block in bytes
 *
 * This is only useful when the bootmem allocator has already been torn
 * down, but we are still initializing the system.  Pages are released directly
 * to the buddy allocator, no bootmem metadata is updated because it is gone.
 */
void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
{
	u64 cursor, end;

	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
		     __func__, (u64)base, (u64)base + size - 1,
		     (void *)_RET_IP_);
1449
	kmemleak_free_part_phys(base, size);
1450 1451 1452 1453
	cursor = PFN_UP(base);
	end = PFN_DOWN(base + size);

	for (; cursor < end; cursor++) {
1454
		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1455 1456 1457
		totalram_pages++;
	}
}
1458 1459 1460 1461 1462

/*
 * Remaining API functions
 */

1463
phys_addr_t __init_memblock memblock_phys_mem_size(void)
Y
Yinghai Lu 已提交
1464
{
1465
	return memblock.memory.total_size;
Y
Yinghai Lu 已提交
1466 1467
}

1468 1469 1470 1471 1472
phys_addr_t __init_memblock memblock_reserved_size(void)
{
	return memblock.reserved.total_size;
}

Y
Yinghai Lu 已提交
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486
phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
{
	unsigned long pages = 0;
	struct memblock_region *r;
	unsigned long start_pfn, end_pfn;

	for_each_memblock(memory, r) {
		start_pfn = memblock_region_memory_base_pfn(r);
		end_pfn = memblock_region_memory_end_pfn(r);
		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
		pages += end_pfn - start_pfn;
	}

F
Fabian Frederick 已提交
1487
	return PFN_PHYS(pages);
Y
Yinghai Lu 已提交
1488 1489
}

1490 1491 1492 1493 1494 1495
/* lowest address */
phys_addr_t __init_memblock memblock_start_of_DRAM(void)
{
	return memblock.memory.regions[0].base;
}

1496
phys_addr_t __init_memblock memblock_end_of_DRAM(void)
Y
Yinghai Lu 已提交
1497 1498 1499
{
	int idx = memblock.memory.cnt - 1;

1500
	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
Y
Yinghai Lu 已提交
1501 1502
}

1503
static phys_addr_t __init_memblock __find_max_addr(phys_addr_t limit)
Y
Yinghai Lu 已提交
1504
{
1505
	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
E
Emil Medve 已提交
1506
	struct memblock_region *r;
Y
Yinghai Lu 已提交
1507

1508 1509 1510 1511 1512
	/*
	 * translate the memory @limit size into the max address within one of
	 * the memory memblock regions, if the @limit exceeds the total size
	 * of those regions, max_addr will keep original value ULLONG_MAX
	 */
E
Emil Medve 已提交
1513
	for_each_memblock(memory, r) {
1514 1515 1516
		if (limit <= r->size) {
			max_addr = r->base + limit;
			break;
Y
Yinghai Lu 已提交
1517
		}
1518
		limit -= r->size;
Y
Yinghai Lu 已提交
1519
	}
1520

1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536
	return max_addr;
}

void __init memblock_enforce_memory_limit(phys_addr_t limit)
{
	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;

	if (!limit)
		return;

	max_addr = __find_max_addr(limit);

	/* @limit exceeds the total size of the memory, do nothing */
	if (max_addr == (phys_addr_t)ULLONG_MAX)
		return;

1537
	/* truncate both memory and reserved regions */
1538 1539 1540 1541
	memblock_remove_range(&memblock.memory, max_addr,
			      (phys_addr_t)ULLONG_MAX);
	memblock_remove_range(&memblock.reserved, max_addr,
			      (phys_addr_t)ULLONG_MAX);
Y
Yinghai Lu 已提交
1542 1543
}

1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573
void __init memblock_mem_limit_remove_map(phys_addr_t limit)
{
	struct memblock_type *type = &memblock.memory;
	phys_addr_t max_addr;
	int i, ret, start_rgn, end_rgn;

	if (!limit)
		return;

	max_addr = __find_max_addr(limit);

	/* @limit exceeds the total size of the memory, do nothing */
	if (max_addr == (phys_addr_t)ULLONG_MAX)
		return;

	ret = memblock_isolate_range(type, max_addr, (phys_addr_t)ULLONG_MAX,
				&start_rgn, &end_rgn);
	if (ret)
		return;

	/* remove all the MAP regions above the limit */
	for (i = end_rgn - 1; i >= start_rgn; i--) {
		if (!memblock_is_nomap(&type->regions[i]))
			memblock_remove_region(type, i);
	}
	/* truncate the reserved regions */
	memblock_remove_range(&memblock.reserved, max_addr,
			      (phys_addr_t)ULLONG_MAX);
}

1574
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591
{
	unsigned int left = 0, right = type->cnt;

	do {
		unsigned int mid = (right + left) / 2;

		if (addr < type->regions[mid].base)
			right = mid;
		else if (addr >= (type->regions[mid].base +
				  type->regions[mid].size))
			left = mid + 1;
		else
			return mid;
	} while (left < right);
	return -1;
}

1592
bool __init memblock_is_reserved(phys_addr_t addr)
Y
Yinghai Lu 已提交
1593
{
1594 1595
	return memblock_search(&memblock.reserved, addr) != -1;
}
Y
Yinghai Lu 已提交
1596

1597
bool __init_memblock memblock_is_memory(phys_addr_t addr)
1598 1599 1600 1601
{
	return memblock_search(&memblock.memory, addr) != -1;
}

1602 1603 1604 1605 1606 1607 1608 1609 1610
int __init_memblock memblock_is_map_memory(phys_addr_t addr)
{
	int i = memblock_search(&memblock.memory, addr);

	if (i == -1)
		return false;
	return !memblock_is_nomap(&memblock.memory.regions[i]);
}

1611 1612 1613 1614 1615
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
			 unsigned long *start_pfn, unsigned long *end_pfn)
{
	struct memblock_type *type = &memblock.memory;
F
Fabian Frederick 已提交
1616
	int mid = memblock_search(type, PFN_PHYS(pfn));
1617 1618 1619 1620

	if (mid == -1)
		return -1;

F
Fabian Frederick 已提交
1621 1622
	*start_pfn = PFN_DOWN(type->regions[mid].base);
	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1623 1624 1625 1626 1627

	return type->regions[mid].nid;
}
#endif

1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
/**
 * memblock_is_region_memory - check if a region is a subset of memory
 * @base: base of region to check
 * @size: size of region to check
 *
 * Check if the region [@base, @base+@size) is a subset of a memory block.
 *
 * RETURNS:
 * 0 if false, non-zero if true
 */
1638
int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1639
{
1640
	int idx = memblock_search(&memblock.memory, base);
1641
	phys_addr_t end = base + memblock_cap_size(base, &size);
1642 1643 1644

	if (idx == -1)
		return 0;
1645
	return (memblock.memory.regions[idx].base +
1646
		 memblock.memory.regions[idx].size) >= end;
Y
Yinghai Lu 已提交
1647 1648
}

1649 1650 1651 1652 1653 1654 1655 1656
/**
 * memblock_is_region_reserved - check if a region intersects reserved memory
 * @base: base of region to check
 * @size: size of region to check
 *
 * Check if the region [@base, @base+@size) intersects a reserved memory block.
 *
 * RETURNS:
1657
 * True if they intersect, false if not.
1658
 */
1659
bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
1660
{
1661
	memblock_cap_size(base, &size);
1662
	return memblock_overlaps_region(&memblock.reserved, base, size);
Y
Yinghai Lu 已提交
1663 1664
}

1665 1666 1667
void __init_memblock memblock_trim_memory(phys_addr_t align)
{
	phys_addr_t start, end, orig_start, orig_end;
E
Emil Medve 已提交
1668
	struct memblock_region *r;
1669

E
Emil Medve 已提交
1670 1671 1672
	for_each_memblock(memory, r) {
		orig_start = r->base;
		orig_end = r->base + r->size;
1673 1674 1675 1676 1677 1678 1679
		start = round_up(orig_start, align);
		end = round_down(orig_end, align);

		if (start == orig_start && end == orig_end)
			continue;

		if (start < end) {
E
Emil Medve 已提交
1680 1681
			r->base = start;
			r->size = end - start;
1682
		} else {
E
Emil Medve 已提交
1683 1684 1685
			memblock_remove_region(&memblock.memory,
					       r - memblock.memory.regions);
			r--;
1686 1687 1688
		}
	}
}
1689

1690
void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1691 1692 1693 1694
{
	memblock.current_limit = limit;
}

1695 1696 1697 1698 1699
phys_addr_t __init_memblock memblock_get_current_limit(void)
{
	return memblock.current_limit;
}

T
Tejun Heo 已提交
1700
static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1701
{
1702
	phys_addr_t base, end, size;
1703
	unsigned long flags;
1704 1705
	int idx;
	struct memblock_region *rgn;
1706

T
Tejun Heo 已提交
1707
	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
1708

1709
	for_each_memblock_type(type, rgn) {
T
Tejun Heo 已提交
1710 1711 1712 1713
		char nid_buf[32] = "";

		base = rgn->base;
		size = rgn->size;
1714
		end = base + size - 1;
1715
		flags = rgn->flags;
T
Tejun Heo 已提交
1716 1717 1718 1719 1720
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
				 memblock_get_region_node(rgn));
#endif
1721 1722
		pr_info(" %s[%#x]\t[%pa-%pa], %pa bytes%s flags: %#lx\n",
			name, idx, &base, &end, &size, nid_buf, flags);
1723 1724 1725
	}
}

T
Tejun Heo 已提交
1726
void __init_memblock __memblock_dump_all(void)
1727 1728
{
	pr_info("MEMBLOCK configuration:\n");
1729 1730 1731
	pr_info(" memory size = %pa reserved size = %pa\n",
		&memblock.memory.total_size,
		&memblock.reserved.total_size);
1732 1733 1734

	memblock_dump(&memblock.memory, "memory");
	memblock_dump(&memblock.reserved, "reserved");
1735 1736 1737
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	memblock_dump(&memblock.physmem, "physmem");
#endif
1738 1739
}

1740
void __init memblock_allow_resize(void)
1741
{
1742
	memblock_can_resize = 1;
1743 1744 1745 1746 1747 1748 1749 1750 1751 1752
}

static int __init early_memblock(char *p)
{
	if (p && strstr(p, "debug"))
		memblock_debug = 1;
	return 0;
}
early_param("memblock", early_memblock);

1753
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1754 1755 1756 1757 1758 1759

static int memblock_debug_show(struct seq_file *m, void *private)
{
	struct memblock_type *type = m->private;
	struct memblock_region *reg;
	int i;
1760
	phys_addr_t end;
1761 1762 1763

	for (i = 0; i < type->cnt; i++) {
		reg = &type->regions[i];
1764
		end = reg->base + reg->size - 1;
1765

1766 1767
		seq_printf(m, "%4d: ", i);
		seq_printf(m, "%pa..%pa\n", &reg->base, &end);
1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790
	}
	return 0;
}

static int memblock_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, memblock_debug_show, inode->i_private);
}

static const struct file_operations memblock_debug_fops = {
	.open = memblock_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init memblock_init_debugfs(void)
{
	struct dentry *root = debugfs_create_dir("memblock", NULL);
	if (!root)
		return -ENXIO;
	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1791 1792 1793
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
#endif
1794 1795 1796 1797 1798 1799

	return 0;
}
__initcall(memblock_init_debugfs);

#endif /* CONFIG_DEBUG_FS */