memblock.c 45.6 KB
Newer Older
Y
Yinghai Lu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Procedures for maintaining information about logical memory blocks.
 *
 * Peter Bergner, IBM Corp.	June 2001.
 * Copyright (C) 2001 Peter Bergner.
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
14
#include <linux/slab.h>
Y
Yinghai Lu 已提交
15 16
#include <linux/init.h>
#include <linux/bitops.h>
17
#include <linux/poison.h>
18
#include <linux/pfn.h>
19 20
#include <linux/debugfs.h>
#include <linux/seq_file.h>
Y
Yinghai Lu 已提交
21 22
#include <linux/memblock.h>

23
#include <asm-generic/sections.h>
24 25 26
#include <linux/io.h>

#include "internal.h"
27

T
Tejun Heo 已提交
28 29
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30 31 32
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
#endif
T
Tejun Heo 已提交
33 34 35 36 37 38 39 40 41 42

struct memblock memblock __initdata_memblock = {
	.memory.regions		= memblock_memory_init_regions,
	.memory.cnt		= 1,	/* empty dummy entry */
	.memory.max		= INIT_MEMBLOCK_REGIONS,

	.reserved.regions	= memblock_reserved_init_regions,
	.reserved.cnt		= 1,	/* empty dummy entry */
	.reserved.max		= INIT_MEMBLOCK_REGIONS,

43 44 45 46 47 48
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	.physmem.regions	= memblock_physmem_init_regions,
	.physmem.cnt		= 1,	/* empty dummy entry */
	.physmem.max		= INIT_PHYSMEM_REGIONS,
#endif

49
	.bottom_up		= false,
T
Tejun Heo 已提交
50 51
	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
};
Y
Yinghai Lu 已提交
52

53
int memblock_debug __initdata_memblock;
54 55 56
#ifdef CONFIG_MOVABLE_NODE
bool movable_node_enabled __initdata_memblock = false;
#endif
57
static int memblock_can_resize __initdata_memblock;
58 59
static int memblock_memory_in_slab __initdata_memblock = 0;
static int memblock_reserved_in_slab __initdata_memblock = 0;
Y
Yinghai Lu 已提交
60

61
/* inline so we don't get a warning when pr_debug is compiled out */
62 63
static __init_memblock const char *
memblock_type_name(struct memblock_type *type)
64 65 66 67 68 69 70 71 72
{
	if (type == &memblock.memory)
		return "memory";
	else if (type == &memblock.reserved)
		return "reserved";
	else
		return "unknown";
}

73 74 75 76 77 78
/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
{
	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
}

79 80 81
/*
 * Address comparison utilities
 */
82
static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
83
				       phys_addr_t base2, phys_addr_t size2)
Y
Yinghai Lu 已提交
84 85 86 87
{
	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
}

88 89
static long __init_memblock memblock_overlaps_region(struct memblock_type *type,
					phys_addr_t base, phys_addr_t size)
90 91 92 93 94 95 96 97 98 99 100 101 102
{
	unsigned long i;

	for (i = 0; i < type->cnt; i++) {
		phys_addr_t rgnbase = type->regions[i].base;
		phys_addr_t rgnsize = type->regions[i].size;
		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
			break;
	}

	return (i < type->cnt) ? i : -1;
}

103 104 105 106 107 108
/*
 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
109
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
110
 * @flags: pick from blocks based on memory attributes
111 112 113 114 115 116 117 118
 *
 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 *
 * RETURNS:
 * Found address on success, 0 on failure.
 */
static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
119 120
				phys_addr_t size, phys_addr_t align, int nid,
				ulong flags)
121 122 123 124
{
	phys_addr_t this_start, this_end, cand;
	u64 i;

125
	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
126 127 128 129 130 131 132 133 134 135 136
		this_start = clamp(this_start, start, end);
		this_end = clamp(this_end, start, end);

		cand = round_up(this_start, align);
		if (cand < this_end && this_end - cand >= size)
			return cand;
	}

	return 0;
}

137
/**
138
 * __memblock_find_range_top_down - find free area utility, in top-down
139 140 141 142
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
143
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
144
 * @flags: pick from blocks based on memory attributes
145
 *
146
 * Utility called from memblock_find_in_range_node(), find free area top-down.
147 148
 *
 * RETURNS:
149
 * Found address on success, 0 on failure.
150
 */
151 152
static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
153 154
			       phys_addr_t size, phys_addr_t align, int nid,
			       ulong flags)
155 156 157 158
{
	phys_addr_t this_start, this_end, cand;
	u64 i;

159 160
	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
					NULL) {
161 162 163 164 165 166 167 168 169 170
		this_start = clamp(this_start, start, end);
		this_end = clamp(this_end, start, end);

		if (this_end < size)
			continue;

		cand = round_down(this_end - size, align);
		if (cand >= this_start)
			return cand;
	}
171

172 173
	return 0;
}
174

175 176 177 178
/**
 * memblock_find_in_range_node - find free area in given range and node
 * @size: size of free area to find
 * @align: alignment of free area to find
179 180
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
181
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
182
 * @flags: pick from blocks based on memory attributes
183 184 185
 *
 * Find @size free area aligned to @align in the specified range and node.
 *
186 187 188 189 190 191 192 193
 * When allocation direction is bottom-up, the @start should be greater
 * than the end of the kernel image. Otherwise, it will be trimmed. The
 * reason is that we want the bottom-up allocation just near the kernel
 * image so it is highly likely that the allocated memory and the kernel
 * will reside in the same node.
 *
 * If bottom-up allocation failed, will try to allocate memory top-down.
 *
194
 * RETURNS:
195
 * Found address on success, 0 on failure.
196
 */
197 198
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
					phys_addr_t align, phys_addr_t start,
199
					phys_addr_t end, int nid, ulong flags)
200
{
201
	phys_addr_t kernel_end, ret;
202

203 204 205 206 207 208 209
	/* pump up @end */
	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
		end = memblock.current_limit;

	/* avoid allocating the first page */
	start = max_t(phys_addr_t, start, PAGE_SIZE);
	end = max(start, end);
210 211 212 213 214 215 216 217 218 219 220 221 222 223
	kernel_end = __pa_symbol(_end);

	/*
	 * try bottom-up allocation only when bottom-up mode
	 * is set and @end is above the kernel image.
	 */
	if (memblock_bottom_up() && end > kernel_end) {
		phys_addr_t bottom_up_start;

		/* make sure we will allocate above the kernel */
		bottom_up_start = max(start, kernel_end);

		/* ok, try bottom-up allocation first */
		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
224
						      size, align, nid, flags);
225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240
		if (ret)
			return ret;

		/*
		 * we always limit bottom-up allocation above the kernel,
		 * but top-down allocation doesn't have the limit, so
		 * retrying top-down allocation may succeed when bottom-up
		 * allocation failed.
		 *
		 * bottom-up allocation is expected to be fail very rarely,
		 * so we use WARN_ONCE() here to see the stack trace if
		 * fail happens.
		 */
		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
			     "memory hotunplug may be affected\n");
	}
241

242 243
	return __memblock_find_range_top_down(start, end, size, align, nid,
					      flags);
244 245
}

246 247 248 249 250 251 252 253 254 255
/**
 * memblock_find_in_range - find free area in given range
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
 *
 * Find @size free area aligned to @align in the specified range.
 *
 * RETURNS:
256
 * Found address on success, 0 on failure.
257
 */
258 259 260
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
					phys_addr_t end, phys_addr_t size,
					phys_addr_t align)
261
{
262
	return memblock_find_in_range_node(size, align, start, end,
263
					    NUMA_NO_NODE, MEMBLOCK_NONE);
264 265
}

266
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
Y
Yinghai Lu 已提交
267
{
268
	type->total_size -= type->regions[r].size;
T
Tejun Heo 已提交
269 270
	memmove(&type->regions[r], &type->regions[r + 1],
		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
271
	type->cnt--;
Y
Yinghai Lu 已提交
272

273 274
	/* Special case for empty arrays */
	if (type->cnt == 0) {
275
		WARN_ON(type->total_size != 0);
276 277 278
		type->cnt = 1;
		type->regions[0].base = 0;
		type->regions[0].size = 0;
279
		type->regions[0].flags = 0;
T
Tejun Heo 已提交
280
		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
281
	}
Y
Yinghai Lu 已提交
282 283
}

284 285
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK

286 287 288 289 290 291 292 293 294 295 296 297
phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
					phys_addr_t *addr)
{
	if (memblock.reserved.regions == memblock_reserved_init_regions)
		return 0;

	*addr = __pa(memblock.reserved.regions);

	return PAGE_ALIGN(sizeof(struct memblock_region) *
			  memblock.reserved.max);
}

298 299 300 301 302 303 304 305 306 307 308 309 310 311
phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
					phys_addr_t *addr)
{
	if (memblock.memory.regions == memblock_memory_init_regions)
		return 0;

	*addr = __pa(memblock.memory.regions);

	return PAGE_ALIGN(sizeof(struct memblock_region) *
			  memblock.memory.max);
}

#endif

312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329
/**
 * memblock_double_array - double the size of the memblock regions array
 * @type: memblock type of the regions array being doubled
 * @new_area_start: starting address of memory range to avoid overlap with
 * @new_area_size: size of memory range to avoid overlap with
 *
 * Double the size of the @type regions array. If memblock is being used to
 * allocate memory for a new reserved regions array and there is a previously
 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
 * waiting to be reserved, ensure the memory used by the new array does
 * not overlap.
 *
 * RETURNS:
 * 0 on success, -1 on failure.
 */
static int __init_memblock memblock_double_array(struct memblock_type *type,
						phys_addr_t new_area_start,
						phys_addr_t new_area_size)
330 331
{
	struct memblock_region *new_array, *old_array;
332
	phys_addr_t old_alloc_size, new_alloc_size;
333 334
	phys_addr_t old_size, new_size, addr;
	int use_slab = slab_is_available();
335
	int *in_slab;
336 337 338 339 340 341 342 343 344 345

	/* We don't allow resizing until we know about the reserved regions
	 * of memory that aren't suitable for allocation
	 */
	if (!memblock_can_resize)
		return -1;

	/* Calculate new doubled size */
	old_size = type->max * sizeof(struct memblock_region);
	new_size = old_size << 1;
346 347 348 349 350 351
	/*
	 * We need to allocated new one align to PAGE_SIZE,
	 *   so we can free them completely later.
	 */
	old_alloc_size = PAGE_ALIGN(old_size);
	new_alloc_size = PAGE_ALIGN(new_size);
352

353 354 355 356 357 358
	/* Retrieve the slab flag */
	if (type == &memblock.memory)
		in_slab = &memblock_memory_in_slab;
	else
		in_slab = &memblock_reserved_in_slab;

359 360 361
	/* Try to find some space for it.
	 *
	 * WARNING: We assume that either slab_is_available() and we use it or
362 363 364
	 * we use MEMBLOCK for allocations. That means that this is unsafe to
	 * use when bootmem is currently active (unless bootmem itself is
	 * implemented on top of MEMBLOCK which isn't the case yet)
365 366
	 *
	 * This should however not be an issue for now, as we currently only
367 368
	 * call into MEMBLOCK while it's still active, or much later when slab
	 * is active for memory hotplug operations
369 370 371
	 */
	if (use_slab) {
		new_array = kmalloc(new_size, GFP_KERNEL);
T
Tejun Heo 已提交
372
		addr = new_array ? __pa(new_array) : 0;
373
	} else {
374 375 376 377 378 379
		/* only exclude range when trying to double reserved.regions */
		if (type != &memblock.reserved)
			new_area_start = new_area_size = 0;

		addr = memblock_find_in_range(new_area_start + new_area_size,
						memblock.current_limit,
380
						new_alloc_size, PAGE_SIZE);
381 382
		if (!addr && new_area_size)
			addr = memblock_find_in_range(0,
383 384
				min(new_area_start, memblock.current_limit),
				new_alloc_size, PAGE_SIZE);
385

386
		new_array = addr ? __va(addr) : NULL;
387
	}
T
Tejun Heo 已提交
388
	if (!addr) {
389 390 391 392 393
		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
		       memblock_type_name(type), type->max, type->max * 2);
		return -1;
	}

394 395 396
	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
			memblock_type_name(type), type->max * 2, (u64)addr,
			(u64)addr + new_size - 1);
397

398 399 400 401
	/*
	 * Found space, we now need to move the array over before we add the
	 * reserved region since it may be our reserved array itself that is
	 * full.
402 403 404 405 406 407 408
	 */
	memcpy(new_array, type->regions, old_size);
	memset(new_array + type->max, 0, old_size);
	old_array = type->regions;
	type->regions = new_array;
	type->max <<= 1;

409
	/* Free old array. We needn't free it if the array is the static one */
410 411 412 413
	if (*in_slab)
		kfree(old_array);
	else if (old_array != memblock_memory_init_regions &&
		 old_array != memblock_reserved_init_regions)
414
		memblock_free(__pa(old_array), old_alloc_size);
415

416 417 418
	/*
	 * Reserve the new array if that comes from the memblock.  Otherwise, we
	 * needn't do it
419 420
	 */
	if (!use_slab)
421
		BUG_ON(memblock_reserve(addr, new_alloc_size));
422 423 424 425

	/* Update slab flag */
	*in_slab = use_slab;

426 427 428
	return 0;
}

429 430 431 432 433 434 435
/**
 * memblock_merge_regions - merge neighboring compatible regions
 * @type: memblock type to scan
 *
 * Scan @type and merge neighboring compatible regions.
 */
static void __init_memblock memblock_merge_regions(struct memblock_type *type)
Y
Yinghai Lu 已提交
436
{
437
	int i = 0;
Y
Yinghai Lu 已提交
438

439 440 441 442
	/* cnt never goes below 1 */
	while (i < type->cnt - 1) {
		struct memblock_region *this = &type->regions[i];
		struct memblock_region *next = &type->regions[i + 1];
Y
Yinghai Lu 已提交
443

T
Tejun Heo 已提交
444 445
		if (this->base + this->size != next->base ||
		    memblock_get_region_node(this) !=
446 447
		    memblock_get_region_node(next) ||
		    this->flags != next->flags) {
448 449 450
			BUG_ON(this->base + this->size > next->base);
			i++;
			continue;
451 452
		}

453
		this->size += next->size;
454 455
		/* move forward from next + 1, index of which is i + 2 */
		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
456
		type->cnt--;
Y
Yinghai Lu 已提交
457
	}
458
}
Y
Yinghai Lu 已提交
459

460 461
/**
 * memblock_insert_region - insert new memblock region
462 463 464 465 466
 * @type:	memblock type to insert into
 * @idx:	index for the insertion point
 * @base:	base address of the new region
 * @size:	size of the new region
 * @nid:	node id of the new region
467
 * @flags:	flags of the new region
468 469 470 471 472 473
 *
 * Insert new memblock region [@base,@base+@size) into @type at @idx.
 * @type must already have extra room to accomodate the new region.
 */
static void __init_memblock memblock_insert_region(struct memblock_type *type,
						   int idx, phys_addr_t base,
474 475
						   phys_addr_t size,
						   int nid, unsigned long flags)
476 477 478 479 480 481 482
{
	struct memblock_region *rgn = &type->regions[idx];

	BUG_ON(type->cnt >= type->max);
	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
	rgn->base = base;
	rgn->size = size;
483
	rgn->flags = flags;
T
Tejun Heo 已提交
484
	memblock_set_region_node(rgn, nid);
485
	type->cnt++;
486
	type->total_size += size;
487 488 489
}

/**
490
 * memblock_add_range - add new memblock region
491 492 493
 * @type: memblock type to add new region into
 * @base: base address of the new region
 * @size: size of the new region
494
 * @nid: nid of the new region
495
 * @flags: flags of the new region
496 497 498 499 500 501 502 503 504
 *
 * Add new memblock region [@base,@base+@size) into @type.  The new region
 * is allowed to overlap with existing ones - overlaps don't affect already
 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 * compatible regions are merged) after the addition.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
505
int __init_memblock memblock_add_range(struct memblock_type *type,
506 507
				phys_addr_t base, phys_addr_t size,
				int nid, unsigned long flags)
508 509
{
	bool insert = false;
510 511
	phys_addr_t obase = base;
	phys_addr_t end = base + memblock_cap_size(base, &size);
512 513
	int i, nr_new;

514 515 516
	if (!size)
		return 0;

517 518
	/* special case for empty array */
	if (type->regions[0].size == 0) {
519
		WARN_ON(type->cnt != 1 || type->total_size);
520 521
		type->regions[0].base = base;
		type->regions[0].size = size;
522
		type->regions[0].flags = flags;
523
		memblock_set_region_node(&type->regions[0], nid);
524
		type->total_size = size;
525
		return 0;
Y
Yinghai Lu 已提交
526
	}
527 528 529 530 531
repeat:
	/*
	 * The following is executed twice.  Once with %false @insert and
	 * then with %true.  The first counts the number of regions needed
	 * to accomodate the new area.  The second actually inserts them.
532
	 */
533 534
	base = obase;
	nr_new = 0;
Y
Yinghai Lu 已提交
535

536 537 538 539 540 541
	for (i = 0; i < type->cnt; i++) {
		struct memblock_region *rgn = &type->regions[i];
		phys_addr_t rbase = rgn->base;
		phys_addr_t rend = rbase + rgn->size;

		if (rbase >= end)
Y
Yinghai Lu 已提交
542
			break;
543 544 545 546 547 548 549 550 551 552
		if (rend <= base)
			continue;
		/*
		 * @rgn overlaps.  If it separates the lower part of new
		 * area, insert that portion.
		 */
		if (rbase > base) {
			nr_new++;
			if (insert)
				memblock_insert_region(type, i++, base,
553 554
						       rbase - base, nid,
						       flags);
Y
Yinghai Lu 已提交
555
		}
556 557
		/* area below @rend is dealt with, forget about it */
		base = min(rend, end);
Y
Yinghai Lu 已提交
558
	}
559 560 561 562 563

	/* insert the remaining portion */
	if (base < end) {
		nr_new++;
		if (insert)
564 565
			memblock_insert_region(type, i, base, end - base,
					       nid, flags);
Y
Yinghai Lu 已提交
566 567
	}

568 569 570
	/*
	 * If this was the first round, resize array and repeat for actual
	 * insertions; otherwise, merge and return.
571
	 */
572 573
	if (!insert) {
		while (type->cnt + nr_new > type->max)
574
			if (memblock_double_array(type, obase, size) < 0)
575 576 577 578 579 580
				return -ENOMEM;
		insert = true;
		goto repeat;
	} else {
		memblock_merge_regions(type);
		return 0;
581
	}
Y
Yinghai Lu 已提交
582 583
}

584 585 586
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
				       int nid)
{
587
	return memblock_add_range(&memblock.memory, base, size, nid, 0);
588 589
}

590 591 592 593 594 595 596 597 598 599 600 601 602 603 604
static int __init_memblock memblock_add_region(phys_addr_t base,
						phys_addr_t size,
						int nid,
						unsigned long flags)
{
	struct memblock_type *_rgn = &memblock.memory;

	memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
		     (unsigned long long)base,
		     (unsigned long long)base + size - 1,
		     flags, (void *)_RET_IP_);

	return memblock_add_range(_rgn, base, size, nid, flags);
}

605
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
606
{
607
	return memblock_add_region(base, size, MAX_NUMNODES, 0);
Y
Yinghai Lu 已提交
608 609
}

610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629
/**
 * memblock_isolate_range - isolate given range into disjoint memblocks
 * @type: memblock type to isolate range for
 * @base: base of range to isolate
 * @size: size of range to isolate
 * @start_rgn: out parameter for the start of isolated region
 * @end_rgn: out parameter for the end of isolated region
 *
 * Walk @type and ensure that regions don't cross the boundaries defined by
 * [@base,@base+@size).  Crossing regions are split at the boundaries,
 * which may create at most two more regions.  The index of the first
 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int __init_memblock memblock_isolate_range(struct memblock_type *type,
					phys_addr_t base, phys_addr_t size,
					int *start_rgn, int *end_rgn)
{
630
	phys_addr_t end = base + memblock_cap_size(base, &size);
631 632 633 634
	int i;

	*start_rgn = *end_rgn = 0;

635 636 637
	if (!size)
		return 0;

638 639
	/* we'll create at most two more regions */
	while (type->cnt + 2 > type->max)
640
		if (memblock_double_array(type, base, size) < 0)
641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658
			return -ENOMEM;

	for (i = 0; i < type->cnt; i++) {
		struct memblock_region *rgn = &type->regions[i];
		phys_addr_t rbase = rgn->base;
		phys_addr_t rend = rbase + rgn->size;

		if (rbase >= end)
			break;
		if (rend <= base)
			continue;

		if (rbase < base) {
			/*
			 * @rgn intersects from below.  Split and continue
			 * to process the next region - the new top half.
			 */
			rgn->base = base;
659 660
			rgn->size -= base - rbase;
			type->total_size -= base - rbase;
661
			memblock_insert_region(type, i, rbase, base - rbase,
662 663
					       memblock_get_region_node(rgn),
					       rgn->flags);
664 665 666 667 668 669
		} else if (rend > end) {
			/*
			 * @rgn intersects from above.  Split and redo the
			 * current region - the new bottom half.
			 */
			rgn->base = end;
670 671
			rgn->size -= end - rbase;
			type->total_size -= end - rbase;
672
			memblock_insert_region(type, i--, rbase, end - rbase,
673 674
					       memblock_get_region_node(rgn),
					       rgn->flags);
675 676 677 678 679 680 681 682 683 684 685
		} else {
			/* @rgn is fully contained, record it */
			if (!*end_rgn)
				*start_rgn = i;
			*end_rgn = i + 1;
		}
	}

	return 0;
}

686 687
int __init_memblock memblock_remove_range(struct memblock_type *type,
					  phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
688
{
689 690
	int start_rgn, end_rgn;
	int i, ret;
Y
Yinghai Lu 已提交
691

692 693 694
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;
Y
Yinghai Lu 已提交
695

696 697
	for (i = end_rgn - 1; i >= start_rgn; i--)
		memblock_remove_region(type, i);
698
	return 0;
Y
Yinghai Lu 已提交
699 700
}

701
int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
702
{
703
	return memblock_remove_range(&memblock.memory, base, size);
Y
Yinghai Lu 已提交
704 705
}

706

707
int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
708
{
709
	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
710
		     (unsigned long long)base,
711
		     (unsigned long long)base + size - 1,
712
		     (void *)_RET_IP_);
713

714
	kmemleak_free_part(__va(base), size);
715
	return memblock_remove_range(&memblock.reserved, base, size);
Y
Yinghai Lu 已提交
716 717
}

718 719 720 721
static int __init_memblock memblock_reserve_region(phys_addr_t base,
						   phys_addr_t size,
						   int nid,
						   unsigned long flags)
Y
Yinghai Lu 已提交
722
{
723
	struct memblock_type *type = &memblock.reserved;
Y
Yinghai Lu 已提交
724

725
	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
726
		     (unsigned long long)base,
727
		     (unsigned long long)base + size - 1,
728 729
		     flags, (void *)_RET_IP_);

730
	return memblock_add_range(type, base, size, nid, flags);
731
}
Y
Yinghai Lu 已提交
732

733 734 735
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
{
	return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
Y
Yinghai Lu 已提交
736 737
}

738 739
/**
 *
740
 * This function isolates region [@base, @base + @size), and sets/clears flag
741 742 743
 *
 * Return 0 on succees, -errno on failure.
 */
744 745
static int __init_memblock memblock_setclr_flag(phys_addr_t base,
				phys_addr_t size, int set, int flag)
746 747 748 749 750 751 752 753 754
{
	struct memblock_type *type = &memblock.memory;
	int i, ret, start_rgn, end_rgn;

	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;

	for (i = start_rgn; i < end_rgn; i++)
755 756 757 758
		if (set)
			memblock_set_region_flags(&type->regions[i], flag);
		else
			memblock_clear_region_flags(&type->regions[i], flag);
759 760 761 762 763 764

	memblock_merge_regions(type);
	return 0;
}

/**
765
 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
766 767 768
 * @base: the base phys addr of the region
 * @size: the size of the region
 *
769 770 771 772 773 774 775 776 777 778 779
 * Return 0 on succees, -errno on failure.
 */
int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
{
	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
}

/**
 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 * @base: the base phys addr of the region
 * @size: the size of the region
780 781 782 783 784
 *
 * Return 0 on succees, -errno on failure.
 */
int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
{
785
	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
786 787
}

788
/**
789
 * __next__mem_range - next function for for_each_free_mem_range() etc.
790
 * @idx: pointer to u64 loop variable
791
 * @nid: node selector, %NUMA_NO_NODE for all nodes
792
 * @flags: pick from blocks based on memory attributes
793 794
 * @type_a: pointer to memblock_type from where the range is taken
 * @type_b: pointer to memblock_type which excludes memory from being taken
W
Wanpeng Li 已提交
795 796 797
 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @out_nid: ptr to int for nid of the range, can be %NULL
798
 *
799
 * Find the first area from *@idx which matches @nid, fill the out
800
 * parameters, and update *@idx for the next iteration.  The lower 32bit of
801 802
 * *@idx contains index into type_a and the upper 32bit indexes the
 * areas before each region in type_b.	For example, if type_b regions
803 804 805 806 807 808 809 810 811 812 813
 * look like the following,
 *
 *	0:[0-16), 1:[32-48), 2:[128-130)
 *
 * The upper 32bit indexes the following regions.
 *
 *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 *
 * As both region arrays are sorted, the function advances the two indices
 * in lockstep and returns each intersection.
 */
814
void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
815 816 817 818
				      struct memblock_type *type_a,
				      struct memblock_type *type_b,
				      phys_addr_t *out_start,
				      phys_addr_t *out_end, int *out_nid)
819
{
820 821
	int idx_a = *idx & 0xffffffff;
	int idx_b = *idx >> 32;
822

823 824
	if (WARN_ONCE(nid == MAX_NUMNODES,
	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
825
		nid = NUMA_NO_NODE;
826

827 828 829
	for (; idx_a < type_a->cnt; idx_a++) {
		struct memblock_region *m = &type_a->regions[idx_a];

830 831
		phys_addr_t m_start = m->base;
		phys_addr_t m_end = m->base + m->size;
832
		int	    m_nid = memblock_get_region_node(m);
833 834

		/* only memory regions are associated with nodes, check it */
835
		if (nid != NUMA_NO_NODE && nid != m_nid)
836 837
			continue;

838 839 840 841
		/* skip hotpluggable memory regions if needed */
		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
			continue;

842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863
		if (!type_b) {
			if (out_start)
				*out_start = m_start;
			if (out_end)
				*out_end = m_end;
			if (out_nid)
				*out_nid = m_nid;
			idx_a++;
			*idx = (u32)idx_a | (u64)idx_b << 32;
			return;
		}

		/* scan areas before each reservation */
		for (; idx_b < type_b->cnt + 1; idx_b++) {
			struct memblock_region *r;
			phys_addr_t r_start;
			phys_addr_t r_end;

			r = &type_b->regions[idx_b];
			r_start = idx_b ? r[-1].base + r[-1].size : 0;
			r_end = idx_b < type_b->cnt ?
				r->base : ULLONG_MAX;
864

865 866 867 868
			/*
			 * if idx_b advanced past idx_a,
			 * break out to advance idx_a
			 */
869 870 871 872 873
			if (r_start >= m_end)
				break;
			/* if the two regions intersect, we're done */
			if (m_start < r_end) {
				if (out_start)
874 875
					*out_start =
						max(m_start, r_start);
876 877 878
				if (out_end)
					*out_end = min(m_end, r_end);
				if (out_nid)
879
					*out_nid = m_nid;
880
				/*
881 882
				 * The region which ends first is
				 * advanced for the next iteration.
883 884
				 */
				if (m_end <= r_end)
885
					idx_a++;
886
				else
887 888
					idx_b++;
				*idx = (u32)idx_a | (u64)idx_b << 32;
889 890 891 892 893 894 895 896 897
				return;
			}
		}
	}

	/* signal end of iteration */
	*idx = ULLONG_MAX;
}

898
/**
899 900 901 902 903
 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
 *
 * Finds the next range from type_a which is not marked as unsuitable
 * in type_b.
 *
904
 * @idx: pointer to u64 loop variable
905
 * @nid: nid: node selector, %NUMA_NO_NODE for all nodes
906
 * @flags: pick from blocks based on memory attributes
907 908
 * @type_a: pointer to memblock_type from where the range is taken
 * @type_b: pointer to memblock_type which excludes memory from being taken
W
Wanpeng Li 已提交
909 910 911
 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @out_nid: ptr to int for nid of the range, can be %NULL
912
 *
913
 * Reverse of __next_mem_range().
914
 */
915
void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
916 917 918 919
					  struct memblock_type *type_a,
					  struct memblock_type *type_b,
					  phys_addr_t *out_start,
					  phys_addr_t *out_end, int *out_nid)
920
{
921 922
	int idx_a = *idx & 0xffffffff;
	int idx_b = *idx >> 32;
923

924 925
	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
		nid = NUMA_NO_NODE;
926 927

	if (*idx == (u64)ULLONG_MAX) {
928 929
		idx_a = type_a->cnt - 1;
		idx_b = type_b->cnt;
930 931
	}

932 933 934
	for (; idx_a >= 0; idx_a--) {
		struct memblock_region *m = &type_a->regions[idx_a];

935 936
		phys_addr_t m_start = m->base;
		phys_addr_t m_end = m->base + m->size;
937
		int m_nid = memblock_get_region_node(m);
938 939

		/* only memory regions are associated with nodes, check it */
940
		if (nid != NUMA_NO_NODE && nid != m_nid)
941 942
			continue;

943 944 945 946
		/* skip hotpluggable memory regions if needed */
		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
			continue;

947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
		if (!type_b) {
			if (out_start)
				*out_start = m_start;
			if (out_end)
				*out_end = m_end;
			if (out_nid)
				*out_nid = m_nid;
			idx_a++;
			*idx = (u32)idx_a | (u64)idx_b << 32;
			return;
		}

		/* scan areas before each reservation */
		for (; idx_b >= 0; idx_b--) {
			struct memblock_region *r;
			phys_addr_t r_start;
			phys_addr_t r_end;

			r = &type_b->regions[idx_b];
			r_start = idx_b ? r[-1].base + r[-1].size : 0;
			r_end = idx_b < type_b->cnt ?
				r->base : ULLONG_MAX;
			/*
			 * if idx_b advanced past idx_a,
			 * break out to advance idx_a
			 */
973 974 975 976 977 978 979 980 981 982

			if (r_end <= m_start)
				break;
			/* if the two regions intersect, we're done */
			if (m_end > r_start) {
				if (out_start)
					*out_start = max(m_start, r_start);
				if (out_end)
					*out_end = min(m_end, r_end);
				if (out_nid)
983
					*out_nid = m_nid;
984
				if (m_start >= r_start)
985
					idx_a--;
986
				else
987 988
					idx_b--;
				*idx = (u32)idx_a | (u64)idx_b << 32;
989 990 991 992
				return;
			}
		}
	}
993
	/* signal end of iteration */
994 995 996
	*idx = ULLONG_MAX;
}

T
Tejun Heo 已提交
997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
 * Common iterator interface used to define for_each_mem_range().
 */
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
				unsigned long *out_start_pfn,
				unsigned long *out_end_pfn, int *out_nid)
{
	struct memblock_type *type = &memblock.memory;
	struct memblock_region *r;

	while (++*idx < type->cnt) {
		r = &type->regions[*idx];

		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
			continue;
		if (nid == MAX_NUMNODES || nid == r->nid)
			break;
	}
	if (*idx >= type->cnt) {
		*idx = -1;
		return;
	}

	if (out_start_pfn)
		*out_start_pfn = PFN_UP(r->base);
	if (out_end_pfn)
		*out_end_pfn = PFN_DOWN(r->base + r->size);
	if (out_nid)
		*out_nid = r->nid;
}

/**
 * memblock_set_node - set node ID on memblock regions
 * @base: base of area to set node ID for
 * @size: size of area to set node ID for
1033
 * @type: memblock type to set node ID for
T
Tejun Heo 已提交
1034 1035
 * @nid: node ID to set
 *
1036
 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
T
Tejun Heo 已提交
1037 1038 1039 1040 1041 1042
 * Regions which cross the area boundaries are split as necessary.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1043
				      struct memblock_type *type, int nid)
T
Tejun Heo 已提交
1044
{
1045 1046
	int start_rgn, end_rgn;
	int i, ret;
T
Tejun Heo 已提交
1047

1048 1049 1050
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;
T
Tejun Heo 已提交
1051

1052
	for (i = start_rgn; i < end_rgn; i++)
1053
		memblock_set_region_node(&type->regions[i], nid);
T
Tejun Heo 已提交
1054 1055 1056 1057 1058 1059

	memblock_merge_regions(type);
	return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

1060 1061
static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
					phys_addr_t align, phys_addr_t start,
1062
					phys_addr_t end, int nid, ulong flags)
Y
Yinghai Lu 已提交
1063
{
1064
	phys_addr_t found;
Y
Yinghai Lu 已提交
1065

1066 1067
	if (!align)
		align = SMP_CACHE_BYTES;
1068

1069 1070
	found = memblock_find_in_range_node(size, align, start, end, nid,
					    flags);
1071 1072 1073 1074 1075 1076
	if (found && !memblock_reserve(found, size)) {
		/*
		 * The min_count is set to 0 so that memblock allocations are
		 * never reported as leaks.
		 */
		kmemleak_alloc(__va(found), size, 0, 0);
1077
		return found;
1078
	}
1079
	return 0;
Y
Yinghai Lu 已提交
1080 1081
}

1082
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1083 1084
					phys_addr_t start, phys_addr_t end,
					ulong flags)
1085
{
1086 1087
	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
					flags);
1088 1089 1090 1091
}

static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
					phys_addr_t align, phys_addr_t max_addr,
1092
					int nid, ulong flags)
1093
{
1094
	return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1095 1096
}

1097 1098
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
1099 1100
	return memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
				       nid, MEMBLOCK_NONE);
1101 1102 1103 1104
}

phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
{
1105 1106
	return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
				       MEMBLOCK_NONE);
1107 1108
}

1109
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
Y
Yinghai Lu 已提交
1110
{
1111 1112 1113 1114 1115 1116 1117 1118 1119
	phys_addr_t alloc;

	alloc = __memblock_alloc_base(size, align, max_addr);

	if (alloc == 0)
		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
		      (unsigned long long) size, (unsigned long long) max_addr);

	return alloc;
Y
Yinghai Lu 已提交
1120 1121
}

1122
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
Y
Yinghai Lu 已提交
1123
{
1124 1125
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
Y
Yinghai Lu 已提交
1126

1127 1128 1129 1130 1131 1132
phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
	phys_addr_t res = memblock_alloc_nid(size, align, nid);

	if (res)
		return res;
1133
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
Y
Yinghai Lu 已提交
1134 1135
}

1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170
/**
 * memblock_virt_alloc_internal - allocate boot memory block
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region to allocate (phys address)
 * @max_addr: the upper bound of the memory region to allocate (phys address)
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * The @min_addr limit is dropped if it can not be satisfied and the allocation
 * will fall back to memory below @min_addr. Also, allocation may fall back
 * to any node in the system if the specified node can not
 * hold the requested memory.
 *
 * The allocation is performed from memory region limited by
 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
 *
 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
 *
 * The phys address of allocated boot memory block is converted to virtual and
 * allocated memory is reset to 0.
 *
 * In addition, function sets the min_count to 0 using kmemleak_alloc for
 * allocated boot memory block, so that it is never reported as leaks.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
static void * __init memblock_virt_alloc_internal(
				phys_addr_t size, phys_addr_t align,
				phys_addr_t min_addr, phys_addr_t max_addr,
				int nid)
{
	phys_addr_t alloc;
	void *ptr;

1171 1172
	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
		nid = NUMA_NO_NODE;
1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184

	/*
	 * Detect any accidental use of these APIs after slab is ready, as at
	 * this moment memblock may be deinitialized already and its
	 * internal data may be destroyed (after execution of free_all_bootmem)
	 */
	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc_node(size, GFP_NOWAIT, nid);

	if (!align)
		align = SMP_CACHE_BYTES;

1185 1186 1187
	if (max_addr > memblock.current_limit)
		max_addr = memblock.current_limit;

1188 1189
again:
	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1190
					    nid, MEMBLOCK_NONE);
1191 1192 1193 1194 1195
	if (alloc)
		goto done;

	if (nid != NUMA_NO_NODE) {
		alloc = memblock_find_in_range_node(size, align, min_addr,
1196 1197
						    max_addr, NUMA_NO_NODE,
						    MEMBLOCK_NONE);
1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309
		if (alloc)
			goto done;
	}

	if (min_addr) {
		min_addr = 0;
		goto again;
	} else {
		goto error;
	}

done:
	memblock_reserve(alloc, size);
	ptr = phys_to_virt(alloc);
	memset(ptr, 0, size);

	/*
	 * The min_count is set to 0 so that bootmem allocated blocks
	 * are never reported as leaks. This is because many of these blocks
	 * are only referred via the physical address which is not
	 * looked up by kmemleak.
	 */
	kmemleak_alloc(ptr, size, 0, 0);

	return ptr;

error:
	return NULL;
}

/**
 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
 * additional debug information (including caller info), if enabled.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void * __init memblock_virt_alloc_try_nid_nopanic(
				phys_addr_t size, phys_addr_t align,
				phys_addr_t min_addr, phys_addr_t max_addr,
				int nid)
{
	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
		     (u64)max_addr, (void *)_RET_IP_);
	return memblock_virt_alloc_internal(size, align, min_addr,
					     max_addr, nid);
}

/**
 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
 * which provides debug information (including caller info), if enabled,
 * and panics if the request can not be satisfied.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void * __init memblock_virt_alloc_try_nid(
			phys_addr_t size, phys_addr_t align,
			phys_addr_t min_addr, phys_addr_t max_addr,
			int nid)
{
	void *ptr;

	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
		     (u64)max_addr, (void *)_RET_IP_);
	ptr = memblock_virt_alloc_internal(size, align,
					   min_addr, max_addr, nid);
	if (ptr)
		return ptr;

	panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
	      __func__, (u64)size, (u64)align, nid, (u64)min_addr,
	      (u64)max_addr);
	return NULL;
}

/**
 * __memblock_free_early - free boot memory block
 * @base: phys starting address of the  boot memory block
 * @size: size of the boot memory block in bytes
 *
 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
 * The freeing memory will not be released to the buddy allocator.
 */
void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
{
	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
		     __func__, (u64)base, (u64)base + size - 1,
		     (void *)_RET_IP_);
	kmemleak_free_part(__va(base), size);
1310
	memblock_remove_range(&memblock.reserved, base, size);
1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337
}

/*
 * __memblock_free_late - free bootmem block pages directly to buddy allocator
 * @addr: phys starting address of the  boot memory block
 * @size: size of the boot memory block in bytes
 *
 * This is only useful when the bootmem allocator has already been torn
 * down, but we are still initializing the system.  Pages are released directly
 * to the buddy allocator, no bootmem metadata is updated because it is gone.
 */
void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
{
	u64 cursor, end;

	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
		     __func__, (u64)base, (u64)base + size - 1,
		     (void *)_RET_IP_);
	kmemleak_free_part(__va(base), size);
	cursor = PFN_UP(base);
	end = PFN_DOWN(base + size);

	for (; cursor < end; cursor++) {
		__free_pages_bootmem(pfn_to_page(cursor), 0);
		totalram_pages++;
	}
}
1338 1339 1340 1341 1342

/*
 * Remaining API functions
 */

1343
phys_addr_t __init memblock_phys_mem_size(void)
Y
Yinghai Lu 已提交
1344
{
1345
	return memblock.memory.total_size;
Y
Yinghai Lu 已提交
1346 1347
}

Y
Yinghai Lu 已提交
1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361
phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
{
	unsigned long pages = 0;
	struct memblock_region *r;
	unsigned long start_pfn, end_pfn;

	for_each_memblock(memory, r) {
		start_pfn = memblock_region_memory_base_pfn(r);
		end_pfn = memblock_region_memory_end_pfn(r);
		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
		pages += end_pfn - start_pfn;
	}

F
Fabian Frederick 已提交
1362
	return PFN_PHYS(pages);
Y
Yinghai Lu 已提交
1363 1364
}

1365 1366 1367 1368 1369 1370
/* lowest address */
phys_addr_t __init_memblock memblock_start_of_DRAM(void)
{
	return memblock.memory.regions[0].base;
}

1371
phys_addr_t __init_memblock memblock_end_of_DRAM(void)
Y
Yinghai Lu 已提交
1372 1373 1374
{
	int idx = memblock.memory.cnt - 1;

1375
	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
Y
Yinghai Lu 已提交
1376 1377
}

1378
void __init memblock_enforce_memory_limit(phys_addr_t limit)
Y
Yinghai Lu 已提交
1379
{
1380
	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
E
Emil Medve 已提交
1381
	struct memblock_region *r;
Y
Yinghai Lu 已提交
1382

1383
	if (!limit)
Y
Yinghai Lu 已提交
1384 1385
		return;

1386
	/* find out max address */
E
Emil Medve 已提交
1387
	for_each_memblock(memory, r) {
1388 1389 1390
		if (limit <= r->size) {
			max_addr = r->base + limit;
			break;
Y
Yinghai Lu 已提交
1391
		}
1392
		limit -= r->size;
Y
Yinghai Lu 已提交
1393
	}
1394 1395

	/* truncate both memory and reserved regions */
1396 1397 1398 1399
	memblock_remove_range(&memblock.memory, max_addr,
			      (phys_addr_t)ULLONG_MAX);
	memblock_remove_range(&memblock.reserved, max_addr,
			      (phys_addr_t)ULLONG_MAX);
Y
Yinghai Lu 已提交
1400 1401
}

1402
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419
{
	unsigned int left = 0, right = type->cnt;

	do {
		unsigned int mid = (right + left) / 2;

		if (addr < type->regions[mid].base)
			right = mid;
		else if (addr >= (type->regions[mid].base +
				  type->regions[mid].size))
			left = mid + 1;
		else
			return mid;
	} while (left < right);
	return -1;
}

1420
int __init memblock_is_reserved(phys_addr_t addr)
Y
Yinghai Lu 已提交
1421
{
1422 1423
	return memblock_search(&memblock.reserved, addr) != -1;
}
Y
Yinghai Lu 已提交
1424

1425
int __init_memblock memblock_is_memory(phys_addr_t addr)
1426 1427 1428 1429
{
	return memblock_search(&memblock.memory, addr) != -1;
}

1430 1431 1432 1433 1434
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
			 unsigned long *start_pfn, unsigned long *end_pfn)
{
	struct memblock_type *type = &memblock.memory;
F
Fabian Frederick 已提交
1435
	int mid = memblock_search(type, PFN_PHYS(pfn));
1436 1437 1438 1439

	if (mid == -1)
		return -1;

F
Fabian Frederick 已提交
1440 1441
	*start_pfn = PFN_DOWN(type->regions[mid].base);
	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1442 1443 1444 1445 1446

	return type->regions[mid].nid;
}
#endif

1447 1448 1449 1450 1451 1452 1453 1454 1455 1456
/**
 * memblock_is_region_memory - check if a region is a subset of memory
 * @base: base of region to check
 * @size: size of region to check
 *
 * Check if the region [@base, @base+@size) is a subset of a memory block.
 *
 * RETURNS:
 * 0 if false, non-zero if true
 */
1457
int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1458
{
1459
	int idx = memblock_search(&memblock.memory, base);
1460
	phys_addr_t end = base + memblock_cap_size(base, &size);
1461 1462 1463

	if (idx == -1)
		return 0;
1464 1465
	return memblock.memory.regions[idx].base <= base &&
		(memblock.memory.regions[idx].base +
1466
		 memblock.memory.regions[idx].size) >= end;
Y
Yinghai Lu 已提交
1467 1468
}

1469 1470 1471 1472 1473 1474 1475 1476 1477 1478
/**
 * memblock_is_region_reserved - check if a region intersects reserved memory
 * @base: base of region to check
 * @size: size of region to check
 *
 * Check if the region [@base, @base+@size) intersects a reserved memory block.
 *
 * RETURNS:
 * 0 if false, non-zero if true
 */
1479
int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
1480
{
1481
	memblock_cap_size(base, &size);
1482
	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
Y
Yinghai Lu 已提交
1483 1484
}

1485 1486 1487
void __init_memblock memblock_trim_memory(phys_addr_t align)
{
	phys_addr_t start, end, orig_start, orig_end;
E
Emil Medve 已提交
1488
	struct memblock_region *r;
1489

E
Emil Medve 已提交
1490 1491 1492
	for_each_memblock(memory, r) {
		orig_start = r->base;
		orig_end = r->base + r->size;
1493 1494 1495 1496 1497 1498 1499
		start = round_up(orig_start, align);
		end = round_down(orig_end, align);

		if (start == orig_start && end == orig_end)
			continue;

		if (start < end) {
E
Emil Medve 已提交
1500 1501
			r->base = start;
			r->size = end - start;
1502
		} else {
E
Emil Medve 已提交
1503 1504 1505
			memblock_remove_region(&memblock.memory,
					       r - memblock.memory.regions);
			r--;
1506 1507 1508
		}
	}
}
1509

1510
void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1511 1512 1513 1514
{
	memblock.current_limit = limit;
}

1515 1516 1517 1518 1519
phys_addr_t __init_memblock memblock_get_current_limit(void)
{
	return memblock.current_limit;
}

T
Tejun Heo 已提交
1520
static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1521 1522
{
	unsigned long long base, size;
1523
	unsigned long flags;
1524 1525
	int i;

T
Tejun Heo 已提交
1526
	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
1527

T
Tejun Heo 已提交
1528 1529 1530 1531 1532 1533
	for (i = 0; i < type->cnt; i++) {
		struct memblock_region *rgn = &type->regions[i];
		char nid_buf[32] = "";

		base = rgn->base;
		size = rgn->size;
1534
		flags = rgn->flags;
T
Tejun Heo 已提交
1535 1536 1537 1538 1539
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
				 memblock_get_region_node(rgn));
#endif
1540 1541
		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
			name, i, base, base + size - 1, size, nid_buf, flags);
1542 1543 1544
	}
}

T
Tejun Heo 已提交
1545
void __init_memblock __memblock_dump_all(void)
1546 1547
{
	pr_info("MEMBLOCK configuration:\n");
1548 1549 1550
	pr_info(" memory size = %#llx reserved size = %#llx\n",
		(unsigned long long)memblock.memory.total_size,
		(unsigned long long)memblock.reserved.total_size);
1551 1552 1553 1554 1555

	memblock_dump(&memblock.memory, "memory");
	memblock_dump(&memblock.reserved, "reserved");
}

1556
void __init memblock_allow_resize(void)
1557
{
1558
	memblock_can_resize = 1;
1559 1560 1561 1562 1563 1564 1565 1566 1567 1568
}

static int __init early_memblock(char *p)
{
	if (p && strstr(p, "debug"))
		memblock_debug = 1;
	return 0;
}
early_param("memblock", early_memblock);

1569
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611

static int memblock_debug_show(struct seq_file *m, void *private)
{
	struct memblock_type *type = m->private;
	struct memblock_region *reg;
	int i;

	for (i = 0; i < type->cnt; i++) {
		reg = &type->regions[i];
		seq_printf(m, "%4d: ", i);
		if (sizeof(phys_addr_t) == 4)
			seq_printf(m, "0x%08lx..0x%08lx\n",
				   (unsigned long)reg->base,
				   (unsigned long)(reg->base + reg->size - 1));
		else
			seq_printf(m, "0x%016llx..0x%016llx\n",
				   (unsigned long long)reg->base,
				   (unsigned long long)(reg->base + reg->size - 1));

	}
	return 0;
}

static int memblock_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, memblock_debug_show, inode->i_private);
}

static const struct file_operations memblock_debug_fops = {
	.open = memblock_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init memblock_init_debugfs(void)
{
	struct dentry *root = debugfs_create_dir("memblock", NULL);
	if (!root)
		return -ENXIO;
	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1612 1613 1614
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
#endif
1615 1616 1617 1618 1619 1620

	return 0;
}
__initcall(memblock_init_debugfs);

#endif /* CONFIG_DEBUG_FS */