memblock.c 47.8 KB
Newer Older
Y
Yinghai Lu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Procedures for maintaining information about logical memory blocks.
 *
 * Peter Bergner, IBM Corp.	June 2001.
 * Copyright (C) 2001 Peter Bergner.
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
14
#include <linux/slab.h>
Y
Yinghai Lu 已提交
15 16
#include <linux/init.h>
#include <linux/bitops.h>
17
#include <linux/poison.h>
18
#include <linux/pfn.h>
19 20
#include <linux/debugfs.h>
#include <linux/seq_file.h>
Y
Yinghai Lu 已提交
21 22
#include <linux/memblock.h>

23
#include <asm-generic/sections.h>
24 25 26
#include <linux/io.h>

#include "internal.h"
27

T
Tejun Heo 已提交
28 29
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS] __initdata_memblock;
30 31 32
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
static struct memblock_region memblock_physmem_init_regions[INIT_PHYSMEM_REGIONS] __initdata_memblock;
#endif
T
Tejun Heo 已提交
33 34 35 36 37 38 39 40 41 42

struct memblock memblock __initdata_memblock = {
	.memory.regions		= memblock_memory_init_regions,
	.memory.cnt		= 1,	/* empty dummy entry */
	.memory.max		= INIT_MEMBLOCK_REGIONS,

	.reserved.regions	= memblock_reserved_init_regions,
	.reserved.cnt		= 1,	/* empty dummy entry */
	.reserved.max		= INIT_MEMBLOCK_REGIONS,

43 44 45 46 47 48
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	.physmem.regions	= memblock_physmem_init_regions,
	.physmem.cnt		= 1,	/* empty dummy entry */
	.physmem.max		= INIT_PHYSMEM_REGIONS,
#endif

49
	.bottom_up		= false,
T
Tejun Heo 已提交
50 51
	.current_limit		= MEMBLOCK_ALLOC_ANYWHERE,
};
Y
Yinghai Lu 已提交
52

53
int memblock_debug __initdata_memblock;
54 55 56
#ifdef CONFIG_MOVABLE_NODE
bool movable_node_enabled __initdata_memblock = false;
#endif
57
static bool system_has_some_mirror __initdata_memblock = false;
58
static int memblock_can_resize __initdata_memblock;
59 60
static int memblock_memory_in_slab __initdata_memblock = 0;
static int memblock_reserved_in_slab __initdata_memblock = 0;
Y
Yinghai Lu 已提交
61

62 63 64 65 66
ulong __init_memblock choose_memblock_flags(void)
{
	return system_has_some_mirror ? MEMBLOCK_MIRROR : MEMBLOCK_NONE;
}

67
/* inline so we don't get a warning when pr_debug is compiled out */
68 69
static __init_memblock const char *
memblock_type_name(struct memblock_type *type)
70 71 72 73 74 75 76 77 78
{
	if (type == &memblock.memory)
		return "memory";
	else if (type == &memblock.reserved)
		return "reserved";
	else
		return "unknown";
}

79 80 81 82 83 84
/* adjust *@size so that (@base + *@size) doesn't overflow, return new size */
static inline phys_addr_t memblock_cap_size(phys_addr_t base, phys_addr_t *size)
{
	return *size = min(*size, (phys_addr_t)ULLONG_MAX - base);
}

85 86 87
/*
 * Address comparison utilities
 */
88
static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
89
				       phys_addr_t base2, phys_addr_t size2)
Y
Yinghai Lu 已提交
90 91 92 93
{
	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
}

94
bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
95
					phys_addr_t base, phys_addr_t size)
96 97 98 99 100 101 102 103 104 105
{
	unsigned long i;

	for (i = 0; i < type->cnt; i++) {
		phys_addr_t rgnbase = type->regions[i].base;
		phys_addr_t rgnsize = type->regions[i].size;
		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
			break;
	}

106
	return i < type->cnt;
107 108
}

109 110 111 112 113 114
/*
 * __memblock_find_range_bottom_up - find free area utility in bottom-up
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
115
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
116
 * @flags: pick from blocks based on memory attributes
117 118 119 120 121 122 123 124
 *
 * Utility called from memblock_find_in_range_node(), find free area bottom-up.
 *
 * RETURNS:
 * Found address on success, 0 on failure.
 */
static phys_addr_t __init_memblock
__memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
125 126
				phys_addr_t size, phys_addr_t align, int nid,
				ulong flags)
127 128 129 130
{
	phys_addr_t this_start, this_end, cand;
	u64 i;

131
	for_each_free_mem_range(i, nid, flags, &this_start, &this_end, NULL) {
132 133 134 135 136 137 138 139 140 141 142
		this_start = clamp(this_start, start, end);
		this_end = clamp(this_end, start, end);

		cand = round_up(this_start, align);
		if (cand < this_end && this_end - cand >= size)
			return cand;
	}

	return 0;
}

143
/**
144
 * __memblock_find_range_top_down - find free area utility, in top-down
145 146 147 148
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
149
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
150
 * @flags: pick from blocks based on memory attributes
151
 *
152
 * Utility called from memblock_find_in_range_node(), find free area top-down.
153 154
 *
 * RETURNS:
155
 * Found address on success, 0 on failure.
156
 */
157 158
static phys_addr_t __init_memblock
__memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
159 160
			       phys_addr_t size, phys_addr_t align, int nid,
			       ulong flags)
161 162 163 164
{
	phys_addr_t this_start, this_end, cand;
	u64 i;

165 166
	for_each_free_mem_range_reverse(i, nid, flags, &this_start, &this_end,
					NULL) {
167 168 169 170 171 172 173 174 175 176
		this_start = clamp(this_start, start, end);
		this_end = clamp(this_end, start, end);

		if (this_end < size)
			continue;

		cand = round_down(this_end - size, align);
		if (cand >= this_start)
			return cand;
	}
177

178 179
	return 0;
}
180

181 182 183 184
/**
 * memblock_find_in_range_node - find free area in given range and node
 * @size: size of free area to find
 * @align: alignment of free area to find
185 186
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
187
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
188
 * @flags: pick from blocks based on memory attributes
189 190 191
 *
 * Find @size free area aligned to @align in the specified range and node.
 *
192 193 194 195 196 197 198 199
 * When allocation direction is bottom-up, the @start should be greater
 * than the end of the kernel image. Otherwise, it will be trimmed. The
 * reason is that we want the bottom-up allocation just near the kernel
 * image so it is highly likely that the allocated memory and the kernel
 * will reside in the same node.
 *
 * If bottom-up allocation failed, will try to allocate memory top-down.
 *
200
 * RETURNS:
201
 * Found address on success, 0 on failure.
202
 */
203 204
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
					phys_addr_t align, phys_addr_t start,
205
					phys_addr_t end, int nid, ulong flags)
206
{
207
	phys_addr_t kernel_end, ret;
208

209 210 211 212 213 214 215
	/* pump up @end */
	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
		end = memblock.current_limit;

	/* avoid allocating the first page */
	start = max_t(phys_addr_t, start, PAGE_SIZE);
	end = max(start, end);
216 217 218 219 220 221 222 223 224 225 226 227 228 229
	kernel_end = __pa_symbol(_end);

	/*
	 * try bottom-up allocation only when bottom-up mode
	 * is set and @end is above the kernel image.
	 */
	if (memblock_bottom_up() && end > kernel_end) {
		phys_addr_t bottom_up_start;

		/* make sure we will allocate above the kernel */
		bottom_up_start = max(start, kernel_end);

		/* ok, try bottom-up allocation first */
		ret = __memblock_find_range_bottom_up(bottom_up_start, end,
230
						      size, align, nid, flags);
231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
		if (ret)
			return ret;

		/*
		 * we always limit bottom-up allocation above the kernel,
		 * but top-down allocation doesn't have the limit, so
		 * retrying top-down allocation may succeed when bottom-up
		 * allocation failed.
		 *
		 * bottom-up allocation is expected to be fail very rarely,
		 * so we use WARN_ONCE() here to see the stack trace if
		 * fail happens.
		 */
		WARN_ONCE(1, "memblock: bottom-up allocation failed, "
			     "memory hotunplug may be affected\n");
	}
247

248 249
	return __memblock_find_range_top_down(start, end, size, align, nid,
					      flags);
250 251
}

252 253 254 255 256 257 258 259 260 261
/**
 * memblock_find_in_range - find free area in given range
 * @start: start of candidate range
 * @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE}
 * @size: size of free area to find
 * @align: alignment of free area to find
 *
 * Find @size free area aligned to @align in the specified range.
 *
 * RETURNS:
262
 * Found address on success, 0 on failure.
263
 */
264 265 266
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
					phys_addr_t end, phys_addr_t size,
					phys_addr_t align)
267
{
268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
	phys_addr_t ret;
	ulong flags = choose_memblock_flags();

again:
	ret = memblock_find_in_range_node(size, align, start, end,
					    NUMA_NO_NODE, flags);

	if (!ret && (flags & MEMBLOCK_MIRROR)) {
		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
			&size);
		flags &= ~MEMBLOCK_MIRROR;
		goto again;
	}

	return ret;
283 284
}

285
static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r)
Y
Yinghai Lu 已提交
286
{
287
	type->total_size -= type->regions[r].size;
T
Tejun Heo 已提交
288 289
	memmove(&type->regions[r], &type->regions[r + 1],
		(type->cnt - (r + 1)) * sizeof(type->regions[r]));
290
	type->cnt--;
Y
Yinghai Lu 已提交
291

292 293
	/* Special case for empty arrays */
	if (type->cnt == 0) {
294
		WARN_ON(type->total_size != 0);
295 296 297
		type->cnt = 1;
		type->regions[0].base = 0;
		type->regions[0].size = 0;
298
		type->regions[0].flags = 0;
T
Tejun Heo 已提交
299
		memblock_set_region_node(&type->regions[0], MAX_NUMNODES);
300
	}
Y
Yinghai Lu 已提交
301 302
}

303 304
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK

305 306 307 308 309 310 311 312 313 314 315 316
phys_addr_t __init_memblock get_allocated_memblock_reserved_regions_info(
					phys_addr_t *addr)
{
	if (memblock.reserved.regions == memblock_reserved_init_regions)
		return 0;

	*addr = __pa(memblock.reserved.regions);

	return PAGE_ALIGN(sizeof(struct memblock_region) *
			  memblock.reserved.max);
}

317 318 319 320 321 322 323 324 325 326 327 328 329 330
phys_addr_t __init_memblock get_allocated_memblock_memory_regions_info(
					phys_addr_t *addr)
{
	if (memblock.memory.regions == memblock_memory_init_regions)
		return 0;

	*addr = __pa(memblock.memory.regions);

	return PAGE_ALIGN(sizeof(struct memblock_region) *
			  memblock.memory.max);
}

#endif

331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
/**
 * memblock_double_array - double the size of the memblock regions array
 * @type: memblock type of the regions array being doubled
 * @new_area_start: starting address of memory range to avoid overlap with
 * @new_area_size: size of memory range to avoid overlap with
 *
 * Double the size of the @type regions array. If memblock is being used to
 * allocate memory for a new reserved regions array and there is a previously
 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
 * waiting to be reserved, ensure the memory used by the new array does
 * not overlap.
 *
 * RETURNS:
 * 0 on success, -1 on failure.
 */
static int __init_memblock memblock_double_array(struct memblock_type *type,
						phys_addr_t new_area_start,
						phys_addr_t new_area_size)
349 350
{
	struct memblock_region *new_array, *old_array;
351
	phys_addr_t old_alloc_size, new_alloc_size;
352 353
	phys_addr_t old_size, new_size, addr;
	int use_slab = slab_is_available();
354
	int *in_slab;
355 356 357 358 359 360 361 362 363 364

	/* We don't allow resizing until we know about the reserved regions
	 * of memory that aren't suitable for allocation
	 */
	if (!memblock_can_resize)
		return -1;

	/* Calculate new doubled size */
	old_size = type->max * sizeof(struct memblock_region);
	new_size = old_size << 1;
365 366 367 368 369 370
	/*
	 * We need to allocated new one align to PAGE_SIZE,
	 *   so we can free them completely later.
	 */
	old_alloc_size = PAGE_ALIGN(old_size);
	new_alloc_size = PAGE_ALIGN(new_size);
371

372 373 374 375 376 377
	/* Retrieve the slab flag */
	if (type == &memblock.memory)
		in_slab = &memblock_memory_in_slab;
	else
		in_slab = &memblock_reserved_in_slab;

378 379 380
	/* Try to find some space for it.
	 *
	 * WARNING: We assume that either slab_is_available() and we use it or
381 382 383
	 * we use MEMBLOCK for allocations. That means that this is unsafe to
	 * use when bootmem is currently active (unless bootmem itself is
	 * implemented on top of MEMBLOCK which isn't the case yet)
384 385
	 *
	 * This should however not be an issue for now, as we currently only
386 387
	 * call into MEMBLOCK while it's still active, or much later when slab
	 * is active for memory hotplug operations
388 389 390
	 */
	if (use_slab) {
		new_array = kmalloc(new_size, GFP_KERNEL);
T
Tejun Heo 已提交
391
		addr = new_array ? __pa(new_array) : 0;
392
	} else {
393 394 395 396 397 398
		/* only exclude range when trying to double reserved.regions */
		if (type != &memblock.reserved)
			new_area_start = new_area_size = 0;

		addr = memblock_find_in_range(new_area_start + new_area_size,
						memblock.current_limit,
399
						new_alloc_size, PAGE_SIZE);
400 401
		if (!addr && new_area_size)
			addr = memblock_find_in_range(0,
402 403
				min(new_area_start, memblock.current_limit),
				new_alloc_size, PAGE_SIZE);
404

405
		new_array = addr ? __va(addr) : NULL;
406
	}
T
Tejun Heo 已提交
407
	if (!addr) {
408 409 410 411 412
		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
		       memblock_type_name(type), type->max, type->max * 2);
		return -1;
	}

413 414 415
	memblock_dbg("memblock: %s is doubled to %ld at [%#010llx-%#010llx]",
			memblock_type_name(type), type->max * 2, (u64)addr,
			(u64)addr + new_size - 1);
416

417 418 419 420
	/*
	 * Found space, we now need to move the array over before we add the
	 * reserved region since it may be our reserved array itself that is
	 * full.
421 422 423 424 425 426 427
	 */
	memcpy(new_array, type->regions, old_size);
	memset(new_array + type->max, 0, old_size);
	old_array = type->regions;
	type->regions = new_array;
	type->max <<= 1;

428
	/* Free old array. We needn't free it if the array is the static one */
429 430 431 432
	if (*in_slab)
		kfree(old_array);
	else if (old_array != memblock_memory_init_regions &&
		 old_array != memblock_reserved_init_regions)
433
		memblock_free(__pa(old_array), old_alloc_size);
434

435 436 437
	/*
	 * Reserve the new array if that comes from the memblock.  Otherwise, we
	 * needn't do it
438 439
	 */
	if (!use_slab)
440
		BUG_ON(memblock_reserve(addr, new_alloc_size));
441 442 443 444

	/* Update slab flag */
	*in_slab = use_slab;

445 446 447
	return 0;
}

448 449 450 451 452 453 454
/**
 * memblock_merge_regions - merge neighboring compatible regions
 * @type: memblock type to scan
 *
 * Scan @type and merge neighboring compatible regions.
 */
static void __init_memblock memblock_merge_regions(struct memblock_type *type)
Y
Yinghai Lu 已提交
455
{
456
	int i = 0;
Y
Yinghai Lu 已提交
457

458 459 460 461
	/* cnt never goes below 1 */
	while (i < type->cnt - 1) {
		struct memblock_region *this = &type->regions[i];
		struct memblock_region *next = &type->regions[i + 1];
Y
Yinghai Lu 已提交
462

T
Tejun Heo 已提交
463 464
		if (this->base + this->size != next->base ||
		    memblock_get_region_node(this) !=
465 466
		    memblock_get_region_node(next) ||
		    this->flags != next->flags) {
467 468 469
			BUG_ON(this->base + this->size > next->base);
			i++;
			continue;
470 471
		}

472
		this->size += next->size;
473 474
		/* move forward from next + 1, index of which is i + 2 */
		memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
475
		type->cnt--;
Y
Yinghai Lu 已提交
476
	}
477
}
Y
Yinghai Lu 已提交
478

479 480
/**
 * memblock_insert_region - insert new memblock region
481 482 483 484 485
 * @type:	memblock type to insert into
 * @idx:	index for the insertion point
 * @base:	base address of the new region
 * @size:	size of the new region
 * @nid:	node id of the new region
486
 * @flags:	flags of the new region
487 488 489 490 491 492
 *
 * Insert new memblock region [@base,@base+@size) into @type at @idx.
 * @type must already have extra room to accomodate the new region.
 */
static void __init_memblock memblock_insert_region(struct memblock_type *type,
						   int idx, phys_addr_t base,
493 494
						   phys_addr_t size,
						   int nid, unsigned long flags)
495 496 497 498 499 500 501
{
	struct memblock_region *rgn = &type->regions[idx];

	BUG_ON(type->cnt >= type->max);
	memmove(rgn + 1, rgn, (type->cnt - idx) * sizeof(*rgn));
	rgn->base = base;
	rgn->size = size;
502
	rgn->flags = flags;
T
Tejun Heo 已提交
503
	memblock_set_region_node(rgn, nid);
504
	type->cnt++;
505
	type->total_size += size;
506 507 508
}

/**
509
 * memblock_add_range - add new memblock region
510 511 512
 * @type: memblock type to add new region into
 * @base: base address of the new region
 * @size: size of the new region
513
 * @nid: nid of the new region
514
 * @flags: flags of the new region
515 516 517 518 519 520 521 522 523
 *
 * Add new memblock region [@base,@base+@size) into @type.  The new region
 * is allowed to overlap with existing ones - overlaps don't affect already
 * existing regions.  @type is guaranteed to be minimal (all neighbouring
 * compatible regions are merged) after the addition.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
524
int __init_memblock memblock_add_range(struct memblock_type *type,
525 526
				phys_addr_t base, phys_addr_t size,
				int nid, unsigned long flags)
527 528
{
	bool insert = false;
529 530
	phys_addr_t obase = base;
	phys_addr_t end = base + memblock_cap_size(base, &size);
531 532
	int i, nr_new;

533 534 535
	if (!size)
		return 0;

536 537
	/* special case for empty array */
	if (type->regions[0].size == 0) {
538
		WARN_ON(type->cnt != 1 || type->total_size);
539 540
		type->regions[0].base = base;
		type->regions[0].size = size;
541
		type->regions[0].flags = flags;
542
		memblock_set_region_node(&type->regions[0], nid);
543
		type->total_size = size;
544
		return 0;
Y
Yinghai Lu 已提交
545
	}
546 547 548 549 550
repeat:
	/*
	 * The following is executed twice.  Once with %false @insert and
	 * then with %true.  The first counts the number of regions needed
	 * to accomodate the new area.  The second actually inserts them.
551
	 */
552 553
	base = obase;
	nr_new = 0;
Y
Yinghai Lu 已提交
554

555 556 557 558 559 560
	for (i = 0; i < type->cnt; i++) {
		struct memblock_region *rgn = &type->regions[i];
		phys_addr_t rbase = rgn->base;
		phys_addr_t rend = rbase + rgn->size;

		if (rbase >= end)
Y
Yinghai Lu 已提交
561
			break;
562 563 564 565 566 567 568
		if (rend <= base)
			continue;
		/*
		 * @rgn overlaps.  If it separates the lower part of new
		 * area, insert that portion.
		 */
		if (rbase > base) {
569 570 571
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
			WARN_ON(nid != memblock_get_region_node(rgn));
#endif
572
			WARN_ON(flags != rgn->flags);
573 574 575
			nr_new++;
			if (insert)
				memblock_insert_region(type, i++, base,
576 577
						       rbase - base, nid,
						       flags);
Y
Yinghai Lu 已提交
578
		}
579 580
		/* area below @rend is dealt with, forget about it */
		base = min(rend, end);
Y
Yinghai Lu 已提交
581
	}
582 583 584 585 586

	/* insert the remaining portion */
	if (base < end) {
		nr_new++;
		if (insert)
587 588
			memblock_insert_region(type, i, base, end - base,
					       nid, flags);
Y
Yinghai Lu 已提交
589 590
	}

591 592 593
	/*
	 * If this was the first round, resize array and repeat for actual
	 * insertions; otherwise, merge and return.
594
	 */
595 596
	if (!insert) {
		while (type->cnt + nr_new > type->max)
597
			if (memblock_double_array(type, obase, size) < 0)
598 599 600 601 602 603
				return -ENOMEM;
		insert = true;
		goto repeat;
	} else {
		memblock_merge_regions(type);
		return 0;
604
	}
Y
Yinghai Lu 已提交
605 606
}

607 608 609
int __init_memblock memblock_add_node(phys_addr_t base, phys_addr_t size,
				       int nid)
{
610
	return memblock_add_range(&memblock.memory, base, size, nid, 0);
611 612
}

613 614 615 616 617
static int __init_memblock memblock_add_region(phys_addr_t base,
						phys_addr_t size,
						int nid,
						unsigned long flags)
{
618
	struct memblock_type *type = &memblock.memory;
619 620 621 622 623 624

	memblock_dbg("memblock_add: [%#016llx-%#016llx] flags %#02lx %pF\n",
		     (unsigned long long)base,
		     (unsigned long long)base + size - 1,
		     flags, (void *)_RET_IP_);

625
	return memblock_add_range(type, base, size, nid, flags);
626 627
}

628
int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
629
{
630
	return memblock_add_region(base, size, MAX_NUMNODES, 0);
Y
Yinghai Lu 已提交
631 632
}

633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652
/**
 * memblock_isolate_range - isolate given range into disjoint memblocks
 * @type: memblock type to isolate range for
 * @base: base of range to isolate
 * @size: size of range to isolate
 * @start_rgn: out parameter for the start of isolated region
 * @end_rgn: out parameter for the end of isolated region
 *
 * Walk @type and ensure that regions don't cross the boundaries defined by
 * [@base,@base+@size).  Crossing regions are split at the boundaries,
 * which may create at most two more regions.  The index of the first
 * region inside the range is returned in *@start_rgn and end in *@end_rgn.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
static int __init_memblock memblock_isolate_range(struct memblock_type *type,
					phys_addr_t base, phys_addr_t size,
					int *start_rgn, int *end_rgn)
{
653
	phys_addr_t end = base + memblock_cap_size(base, &size);
654 655 656 657
	int i;

	*start_rgn = *end_rgn = 0;

658 659 660
	if (!size)
		return 0;

661 662
	/* we'll create at most two more regions */
	while (type->cnt + 2 > type->max)
663
		if (memblock_double_array(type, base, size) < 0)
664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681
			return -ENOMEM;

	for (i = 0; i < type->cnt; i++) {
		struct memblock_region *rgn = &type->regions[i];
		phys_addr_t rbase = rgn->base;
		phys_addr_t rend = rbase + rgn->size;

		if (rbase >= end)
			break;
		if (rend <= base)
			continue;

		if (rbase < base) {
			/*
			 * @rgn intersects from below.  Split and continue
			 * to process the next region - the new top half.
			 */
			rgn->base = base;
682 683
			rgn->size -= base - rbase;
			type->total_size -= base - rbase;
684
			memblock_insert_region(type, i, rbase, base - rbase,
685 686
					       memblock_get_region_node(rgn),
					       rgn->flags);
687 688 689 690 691 692
		} else if (rend > end) {
			/*
			 * @rgn intersects from above.  Split and redo the
			 * current region - the new bottom half.
			 */
			rgn->base = end;
693 694
			rgn->size -= end - rbase;
			type->total_size -= end - rbase;
695
			memblock_insert_region(type, i--, rbase, end - rbase,
696 697
					       memblock_get_region_node(rgn),
					       rgn->flags);
698 699 700 701 702 703 704 705 706 707 708
		} else {
			/* @rgn is fully contained, record it */
			if (!*end_rgn)
				*start_rgn = i;
			*end_rgn = i + 1;
		}
	}

	return 0;
}

709 710
int __init_memblock memblock_remove_range(struct memblock_type *type,
					  phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
711
{
712 713
	int start_rgn, end_rgn;
	int i, ret;
Y
Yinghai Lu 已提交
714

715 716 717
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;
Y
Yinghai Lu 已提交
718

719 720
	for (i = end_rgn - 1; i >= start_rgn; i--)
		memblock_remove_region(type, i);
721
	return 0;
Y
Yinghai Lu 已提交
722 723
}

724
int __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
725
{
726
	return memblock_remove_range(&memblock.memory, base, size);
Y
Yinghai Lu 已提交
727 728
}

729

730
int __init_memblock memblock_free(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
731
{
732
	memblock_dbg("   memblock_free: [%#016llx-%#016llx] %pF\n",
733
		     (unsigned long long)base,
734
		     (unsigned long long)base + size - 1,
735
		     (void *)_RET_IP_);
736

737
	kmemleak_free_part(__va(base), size);
738
	return memblock_remove_range(&memblock.reserved, base, size);
Y
Yinghai Lu 已提交
739 740
}

741 742 743 744
static int __init_memblock memblock_reserve_region(phys_addr_t base,
						   phys_addr_t size,
						   int nid,
						   unsigned long flags)
Y
Yinghai Lu 已提交
745
{
746
	struct memblock_type *type = &memblock.reserved;
Y
Yinghai Lu 已提交
747

748
	memblock_dbg("memblock_reserve: [%#016llx-%#016llx] flags %#02lx %pF\n",
749
		     (unsigned long long)base,
750
		     (unsigned long long)base + size - 1,
751 752
		     flags, (void *)_RET_IP_);

753
	return memblock_add_range(type, base, size, nid, flags);
754
}
Y
Yinghai Lu 已提交
755

756 757 758
int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
{
	return memblock_reserve_region(base, size, MAX_NUMNODES, 0);
Y
Yinghai Lu 已提交
759 760
}

761 762
/**
 *
763
 * This function isolates region [@base, @base + @size), and sets/clears flag
764
 *
765
 * Return 0 on success, -errno on failure.
766
 */
767 768
static int __init_memblock memblock_setclr_flag(phys_addr_t base,
				phys_addr_t size, int set, int flag)
769 770 771 772 773 774 775 776 777
{
	struct memblock_type *type = &memblock.memory;
	int i, ret, start_rgn, end_rgn;

	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;

	for (i = start_rgn; i < end_rgn; i++)
778 779 780 781
		if (set)
			memblock_set_region_flags(&type->regions[i], flag);
		else
			memblock_clear_region_flags(&type->regions[i], flag);
782 783 784 785 786 787

	memblock_merge_regions(type);
	return 0;
}

/**
788
 * memblock_mark_hotplug - Mark hotpluggable memory with flag MEMBLOCK_HOTPLUG.
789 790 791
 * @base: the base phys addr of the region
 * @size: the size of the region
 *
792
 * Return 0 on success, -errno on failure.
793 794 795 796 797 798 799 800 801 802
 */
int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
{
	return memblock_setclr_flag(base, size, 1, MEMBLOCK_HOTPLUG);
}

/**
 * memblock_clear_hotplug - Clear flag MEMBLOCK_HOTPLUG for a specified region.
 * @base: the base phys addr of the region
 * @size: the size of the region
803
 *
804
 * Return 0 on success, -errno on failure.
805 806 807
 */
int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
{
808
	return memblock_setclr_flag(base, size, 0, MEMBLOCK_HOTPLUG);
809 810
}

811 812 813 814 815
/**
 * memblock_mark_mirror - Mark mirrored memory with flag MEMBLOCK_MIRROR.
 * @base: the base phys addr of the region
 * @size: the size of the region
 *
816
 * Return 0 on success, -errno on failure.
817 818 819 820 821 822 823 824 825
 */
int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
{
	system_has_some_mirror = true;

	return memblock_setclr_flag(base, size, 1, MEMBLOCK_MIRROR);
}


826 827 828 829 830 831 832 833 834 835 836 837
/**
 * __next_reserved_mem_region - next function for for_each_reserved_region()
 * @idx: pointer to u64 loop variable
 * @out_start: ptr to phys_addr_t for start address of the region, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the region, can be %NULL
 *
 * Iterate over all reserved memory regions.
 */
void __init_memblock __next_reserved_mem_region(u64 *idx,
					   phys_addr_t *out_start,
					   phys_addr_t *out_end)
{
838
	struct memblock_type *type = &memblock.reserved;
839

840 841
	if (*idx >= 0 && *idx < type->cnt) {
		struct memblock_region *r = &type->regions[*idx];
842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857
		phys_addr_t base = r->base;
		phys_addr_t size = r->size;

		if (out_start)
			*out_start = base;
		if (out_end)
			*out_end = base + size - 1;

		*idx += 1;
		return;
	}

	/* signal end of iteration */
	*idx = ULLONG_MAX;
}

858
/**
859
 * __next__mem_range - next function for for_each_free_mem_range() etc.
860
 * @idx: pointer to u64 loop variable
861
 * @nid: node selector, %NUMA_NO_NODE for all nodes
862
 * @flags: pick from blocks based on memory attributes
863 864
 * @type_a: pointer to memblock_type from where the range is taken
 * @type_b: pointer to memblock_type which excludes memory from being taken
W
Wanpeng Li 已提交
865 866 867
 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @out_nid: ptr to int for nid of the range, can be %NULL
868
 *
869
 * Find the first area from *@idx which matches @nid, fill the out
870
 * parameters, and update *@idx for the next iteration.  The lower 32bit of
871 872
 * *@idx contains index into type_a and the upper 32bit indexes the
 * areas before each region in type_b.	For example, if type_b regions
873 874 875 876 877 878 879 880 881 882 883
 * look like the following,
 *
 *	0:[0-16), 1:[32-48), 2:[128-130)
 *
 * The upper 32bit indexes the following regions.
 *
 *	0:[0-0), 1:[16-32), 2:[48-128), 3:[130-MAX)
 *
 * As both region arrays are sorted, the function advances the two indices
 * in lockstep and returns each intersection.
 */
884
void __init_memblock __next_mem_range(u64 *idx, int nid, ulong flags,
885 886 887 888
				      struct memblock_type *type_a,
				      struct memblock_type *type_b,
				      phys_addr_t *out_start,
				      phys_addr_t *out_end, int *out_nid)
889
{
890 891
	int idx_a = *idx & 0xffffffff;
	int idx_b = *idx >> 32;
892

893 894
	if (WARN_ONCE(nid == MAX_NUMNODES,
	"Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
895
		nid = NUMA_NO_NODE;
896

897 898 899
	for (; idx_a < type_a->cnt; idx_a++) {
		struct memblock_region *m = &type_a->regions[idx_a];

900 901
		phys_addr_t m_start = m->base;
		phys_addr_t m_end = m->base + m->size;
902
		int	    m_nid = memblock_get_region_node(m);
903 904

		/* only memory regions are associated with nodes, check it */
905
		if (nid != NUMA_NO_NODE && nid != m_nid)
906 907
			continue;

908 909 910 911
		/* skip hotpluggable memory regions if needed */
		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
			continue;

912 913 914 915
		/* if we want mirror memory skip non-mirror memory regions */
		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
			continue;

916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937
		if (!type_b) {
			if (out_start)
				*out_start = m_start;
			if (out_end)
				*out_end = m_end;
			if (out_nid)
				*out_nid = m_nid;
			idx_a++;
			*idx = (u32)idx_a | (u64)idx_b << 32;
			return;
		}

		/* scan areas before each reservation */
		for (; idx_b < type_b->cnt + 1; idx_b++) {
			struct memblock_region *r;
			phys_addr_t r_start;
			phys_addr_t r_end;

			r = &type_b->regions[idx_b];
			r_start = idx_b ? r[-1].base + r[-1].size : 0;
			r_end = idx_b < type_b->cnt ?
				r->base : ULLONG_MAX;
938

939 940 941 942
			/*
			 * if idx_b advanced past idx_a,
			 * break out to advance idx_a
			 */
943 944 945 946 947
			if (r_start >= m_end)
				break;
			/* if the two regions intersect, we're done */
			if (m_start < r_end) {
				if (out_start)
948 949
					*out_start =
						max(m_start, r_start);
950 951 952
				if (out_end)
					*out_end = min(m_end, r_end);
				if (out_nid)
953
					*out_nid = m_nid;
954
				/*
955 956
				 * The region which ends first is
				 * advanced for the next iteration.
957 958
				 */
				if (m_end <= r_end)
959
					idx_a++;
960
				else
961 962
					idx_b++;
				*idx = (u32)idx_a | (u64)idx_b << 32;
963 964 965 966 967 968 969 970 971
				return;
			}
		}
	}

	/* signal end of iteration */
	*idx = ULLONG_MAX;
}

972
/**
973 974 975 976 977
 * __next_mem_range_rev - generic next function for for_each_*_range_rev()
 *
 * Finds the next range from type_a which is not marked as unsuitable
 * in type_b.
 *
978
 * @idx: pointer to u64 loop variable
979
 * @nid: node selector, %NUMA_NO_NODE for all nodes
980
 * @flags: pick from blocks based on memory attributes
981 982
 * @type_a: pointer to memblock_type from where the range is taken
 * @type_b: pointer to memblock_type which excludes memory from being taken
W
Wanpeng Li 已提交
983 984 985
 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
 * @out_nid: ptr to int for nid of the range, can be %NULL
986
 *
987
 * Reverse of __next_mem_range().
988
 */
989
void __init_memblock __next_mem_range_rev(u64 *idx, int nid, ulong flags,
990 991 992 993
					  struct memblock_type *type_a,
					  struct memblock_type *type_b,
					  phys_addr_t *out_start,
					  phys_addr_t *out_end, int *out_nid)
994
{
995 996
	int idx_a = *idx & 0xffffffff;
	int idx_b = *idx >> 32;
997

998 999
	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
		nid = NUMA_NO_NODE;
1000 1001

	if (*idx == (u64)ULLONG_MAX) {
1002 1003
		idx_a = type_a->cnt - 1;
		idx_b = type_b->cnt;
1004 1005
	}

1006 1007 1008
	for (; idx_a >= 0; idx_a--) {
		struct memblock_region *m = &type_a->regions[idx_a];

1009 1010
		phys_addr_t m_start = m->base;
		phys_addr_t m_end = m->base + m->size;
1011
		int m_nid = memblock_get_region_node(m);
1012 1013

		/* only memory regions are associated with nodes, check it */
1014
		if (nid != NUMA_NO_NODE && nid != m_nid)
1015 1016
			continue;

1017 1018 1019 1020
		/* skip hotpluggable memory regions if needed */
		if (movable_node_is_enabled() && memblock_is_hotpluggable(m))
			continue;

1021 1022 1023 1024
		/* if we want mirror memory skip non-mirror memory regions */
		if ((flags & MEMBLOCK_MIRROR) && !memblock_is_mirror(m))
			continue;

1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050
		if (!type_b) {
			if (out_start)
				*out_start = m_start;
			if (out_end)
				*out_end = m_end;
			if (out_nid)
				*out_nid = m_nid;
			idx_a++;
			*idx = (u32)idx_a | (u64)idx_b << 32;
			return;
		}

		/* scan areas before each reservation */
		for (; idx_b >= 0; idx_b--) {
			struct memblock_region *r;
			phys_addr_t r_start;
			phys_addr_t r_end;

			r = &type_b->regions[idx_b];
			r_start = idx_b ? r[-1].base + r[-1].size : 0;
			r_end = idx_b < type_b->cnt ?
				r->base : ULLONG_MAX;
			/*
			 * if idx_b advanced past idx_a,
			 * break out to advance idx_a
			 */
1051 1052 1053 1054 1055 1056 1057 1058 1059 1060

			if (r_end <= m_start)
				break;
			/* if the two regions intersect, we're done */
			if (m_end > r_start) {
				if (out_start)
					*out_start = max(m_start, r_start);
				if (out_end)
					*out_end = min(m_end, r_end);
				if (out_nid)
1061
					*out_nid = m_nid;
1062
				if (m_start >= r_start)
1063
					idx_a--;
1064
				else
1065 1066
					idx_b--;
				*idx = (u32)idx_a | (u64)idx_b << 32;
1067 1068 1069 1070
				return;
			}
		}
	}
1071
	/* signal end of iteration */
1072 1073 1074
	*idx = ULLONG_MAX;
}

T
Tejun Heo 已提交
1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
/*
 * Common iterator interface used to define for_each_mem_range().
 */
void __init_memblock __next_mem_pfn_range(int *idx, int nid,
				unsigned long *out_start_pfn,
				unsigned long *out_end_pfn, int *out_nid)
{
	struct memblock_type *type = &memblock.memory;
	struct memblock_region *r;

	while (++*idx < type->cnt) {
		r = &type->regions[*idx];

		if (PFN_UP(r->base) >= PFN_DOWN(r->base + r->size))
			continue;
		if (nid == MAX_NUMNODES || nid == r->nid)
			break;
	}
	if (*idx >= type->cnt) {
		*idx = -1;
		return;
	}

	if (out_start_pfn)
		*out_start_pfn = PFN_UP(r->base);
	if (out_end_pfn)
		*out_end_pfn = PFN_DOWN(r->base + r->size);
	if (out_nid)
		*out_nid = r->nid;
}

/**
 * memblock_set_node - set node ID on memblock regions
 * @base: base of area to set node ID for
 * @size: size of area to set node ID for
1111
 * @type: memblock type to set node ID for
T
Tejun Heo 已提交
1112 1113
 * @nid: node ID to set
 *
1114
 * Set the nid of memblock @type regions in [@base,@base+@size) to @nid.
T
Tejun Heo 已提交
1115 1116 1117 1118 1119 1120
 * Regions which cross the area boundaries are split as necessary.
 *
 * RETURNS:
 * 0 on success, -errno on failure.
 */
int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
1121
				      struct memblock_type *type, int nid)
T
Tejun Heo 已提交
1122
{
1123 1124
	int start_rgn, end_rgn;
	int i, ret;
T
Tejun Heo 已提交
1125

1126 1127 1128
	ret = memblock_isolate_range(type, base, size, &start_rgn, &end_rgn);
	if (ret)
		return ret;
T
Tejun Heo 已提交
1129

1130
	for (i = start_rgn; i < end_rgn; i++)
1131
		memblock_set_region_node(&type->regions[i], nid);
T
Tejun Heo 已提交
1132 1133 1134 1135 1136 1137

	memblock_merge_regions(type);
	return 0;
}
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */

1138 1139
static phys_addr_t __init memblock_alloc_range_nid(phys_addr_t size,
					phys_addr_t align, phys_addr_t start,
1140
					phys_addr_t end, int nid, ulong flags)
Y
Yinghai Lu 已提交
1141
{
1142
	phys_addr_t found;
Y
Yinghai Lu 已提交
1143

1144 1145
	if (!align)
		align = SMP_CACHE_BYTES;
1146

1147 1148
	found = memblock_find_in_range_node(size, align, start, end, nid,
					    flags);
1149 1150 1151 1152 1153 1154
	if (found && !memblock_reserve(found, size)) {
		/*
		 * The min_count is set to 0 so that memblock allocations are
		 * never reported as leaks.
		 */
		kmemleak_alloc(__va(found), size, 0, 0);
1155
		return found;
1156
	}
1157
	return 0;
Y
Yinghai Lu 已提交
1158 1159
}

1160
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
1161 1162
					phys_addr_t start, phys_addr_t end,
					ulong flags)
1163
{
1164 1165
	return memblock_alloc_range_nid(size, align, start, end, NUMA_NO_NODE,
					flags);
1166 1167 1168 1169
}

static phys_addr_t __init memblock_alloc_base_nid(phys_addr_t size,
					phys_addr_t align, phys_addr_t max_addr,
1170
					int nid, ulong flags)
1171
{
1172
	return memblock_alloc_range_nid(size, align, 0, max_addr, nid, flags);
1173 1174
}

1175 1176
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
{
1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188
	ulong flags = choose_memblock_flags();
	phys_addr_t ret;

again:
	ret = memblock_alloc_base_nid(size, align, MEMBLOCK_ALLOC_ACCESSIBLE,
				      nid, flags);

	if (!ret && (flags & MEMBLOCK_MIRROR)) {
		flags &= ~MEMBLOCK_MIRROR;
		goto again;
	}
	return ret;
1189 1190 1191 1192
}

phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
{
1193 1194
	return memblock_alloc_base_nid(size, align, max_addr, NUMA_NO_NODE,
				       MEMBLOCK_NONE);
1195 1196
}

1197
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
Y
Yinghai Lu 已提交
1198
{
1199 1200 1201 1202 1203 1204 1205 1206 1207
	phys_addr_t alloc;

	alloc = __memblock_alloc_base(size, align, max_addr);

	if (alloc == 0)
		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
		      (unsigned long long) size, (unsigned long long) max_addr);

	return alloc;
Y
Yinghai Lu 已提交
1208 1209
}

1210
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
Y
Yinghai Lu 已提交
1211
{
1212 1213
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
Y
Yinghai Lu 已提交
1214

1215 1216 1217 1218 1219 1220
phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
	phys_addr_t res = memblock_alloc_nid(size, align, nid);

	if (res)
		return res;
1221
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
Y
Yinghai Lu 已提交
1222 1223
}

1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257
/**
 * memblock_virt_alloc_internal - allocate boot memory block
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region to allocate (phys address)
 * @max_addr: the upper bound of the memory region to allocate (phys address)
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * The @min_addr limit is dropped if it can not be satisfied and the allocation
 * will fall back to memory below @min_addr. Also, allocation may fall back
 * to any node in the system if the specified node can not
 * hold the requested memory.
 *
 * The allocation is performed from memory region limited by
 * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
 *
 * The memory block is aligned on SMP_CACHE_BYTES if @align == 0.
 *
 * The phys address of allocated boot memory block is converted to virtual and
 * allocated memory is reset to 0.
 *
 * In addition, function sets the min_count to 0 using kmemleak_alloc for
 * allocated boot memory block, so that it is never reported as leaks.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
static void * __init memblock_virt_alloc_internal(
				phys_addr_t size, phys_addr_t align,
				phys_addr_t min_addr, phys_addr_t max_addr,
				int nid)
{
	phys_addr_t alloc;
	void *ptr;
1258
	ulong flags = choose_memblock_flags();
1259

1260 1261
	if (WARN_ONCE(nid == MAX_NUMNODES, "Usage of MAX_NUMNODES is deprecated. Use NUMA_NO_NODE instead\n"))
		nid = NUMA_NO_NODE;
1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273

	/*
	 * Detect any accidental use of these APIs after slab is ready, as at
	 * this moment memblock may be deinitialized already and its
	 * internal data may be destroyed (after execution of free_all_bootmem)
	 */
	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc_node(size, GFP_NOWAIT, nid);

	if (!align)
		align = SMP_CACHE_BYTES;

1274 1275 1276
	if (max_addr > memblock.current_limit)
		max_addr = memblock.current_limit;

1277 1278
again:
	alloc = memblock_find_in_range_node(size, align, min_addr, max_addr,
1279
					    nid, flags);
1280 1281 1282 1283 1284
	if (alloc)
		goto done;

	if (nid != NUMA_NO_NODE) {
		alloc = memblock_find_in_range_node(size, align, min_addr,
1285
						    max_addr, NUMA_NO_NODE,
1286
						    flags);
1287 1288 1289 1290 1291 1292 1293 1294 1295
		if (alloc)
			goto done;
	}

	if (min_addr) {
		min_addr = 0;
		goto again;
	}

1296 1297 1298 1299 1300 1301 1302 1303
	if (flags & MEMBLOCK_MIRROR) {
		flags &= ~MEMBLOCK_MIRROR;
		pr_warn("Could not allocate %pap bytes of mirrored memory\n",
			&size);
		goto again;
	}

	return NULL;
1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401
done:
	memblock_reserve(alloc, size);
	ptr = phys_to_virt(alloc);
	memset(ptr, 0, size);

	/*
	 * The min_count is set to 0 so that bootmem allocated blocks
	 * are never reported as leaks. This is because many of these blocks
	 * are only referred via the physical address which is not
	 * looked up by kmemleak.
	 */
	kmemleak_alloc(ptr, size, 0, 0);

	return ptr;
}

/**
 * memblock_virt_alloc_try_nid_nopanic - allocate boot memory block
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public version of _memblock_virt_alloc_try_nid_nopanic() which provides
 * additional debug information (including caller info), if enabled.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void * __init memblock_virt_alloc_try_nid_nopanic(
				phys_addr_t size, phys_addr_t align,
				phys_addr_t min_addr, phys_addr_t max_addr,
				int nid)
{
	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
		     (u64)max_addr, (void *)_RET_IP_);
	return memblock_virt_alloc_internal(size, align, min_addr,
					     max_addr, nid);
}

/**
 * memblock_virt_alloc_try_nid - allocate boot memory block with panicking
 * @size: size of memory block to be allocated in bytes
 * @align: alignment of the region and block's size
 * @min_addr: the lower bound of the memory region from where the allocation
 *	  is preferred (phys address)
 * @max_addr: the upper bound of the memory region from where the allocation
 *	      is preferred (phys address), or %BOOTMEM_ALLOC_ACCESSIBLE to
 *	      allocate only from memory limited by memblock.current_limit value
 * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
 *
 * Public panicking version of _memblock_virt_alloc_try_nid_nopanic()
 * which provides debug information (including caller info), if enabled,
 * and panics if the request can not be satisfied.
 *
 * RETURNS:
 * Virtual address of allocated memory block on success, NULL on failure.
 */
void * __init memblock_virt_alloc_try_nid(
			phys_addr_t size, phys_addr_t align,
			phys_addr_t min_addr, phys_addr_t max_addr,
			int nid)
{
	void *ptr;

	memblock_dbg("%s: %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx %pF\n",
		     __func__, (u64)size, (u64)align, nid, (u64)min_addr,
		     (u64)max_addr, (void *)_RET_IP_);
	ptr = memblock_virt_alloc_internal(size, align,
					   min_addr, max_addr, nid);
	if (ptr)
		return ptr;

	panic("%s: Failed to allocate %llu bytes align=0x%llx nid=%d from=0x%llx max_addr=0x%llx\n",
	      __func__, (u64)size, (u64)align, nid, (u64)min_addr,
	      (u64)max_addr);
	return NULL;
}

/**
 * __memblock_free_early - free boot memory block
 * @base: phys starting address of the  boot memory block
 * @size: size of the boot memory block in bytes
 *
 * Free boot memory block previously allocated by memblock_virt_alloc_xx() API.
 * The freeing memory will not be released to the buddy allocator.
 */
void __init __memblock_free_early(phys_addr_t base, phys_addr_t size)
{
	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
		     __func__, (u64)base, (u64)base + size - 1,
		     (void *)_RET_IP_);
	kmemleak_free_part(__va(base), size);
1402
	memblock_remove_range(&memblock.reserved, base, size);
1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425
}

/*
 * __memblock_free_late - free bootmem block pages directly to buddy allocator
 * @addr: phys starting address of the  boot memory block
 * @size: size of the boot memory block in bytes
 *
 * This is only useful when the bootmem allocator has already been torn
 * down, but we are still initializing the system.  Pages are released directly
 * to the buddy allocator, no bootmem metadata is updated because it is gone.
 */
void __init __memblock_free_late(phys_addr_t base, phys_addr_t size)
{
	u64 cursor, end;

	memblock_dbg("%s: [%#016llx-%#016llx] %pF\n",
		     __func__, (u64)base, (u64)base + size - 1,
		     (void *)_RET_IP_);
	kmemleak_free_part(__va(base), size);
	cursor = PFN_UP(base);
	end = PFN_DOWN(base + size);

	for (; cursor < end; cursor++) {
1426
		__free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
1427 1428 1429
		totalram_pages++;
	}
}
1430 1431 1432 1433 1434

/*
 * Remaining API functions
 */

1435
phys_addr_t __init memblock_phys_mem_size(void)
Y
Yinghai Lu 已提交
1436
{
1437
	return memblock.memory.total_size;
Y
Yinghai Lu 已提交
1438 1439
}

Y
Yinghai Lu 已提交
1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453
phys_addr_t __init memblock_mem_size(unsigned long limit_pfn)
{
	unsigned long pages = 0;
	struct memblock_region *r;
	unsigned long start_pfn, end_pfn;

	for_each_memblock(memory, r) {
		start_pfn = memblock_region_memory_base_pfn(r);
		end_pfn = memblock_region_memory_end_pfn(r);
		start_pfn = min_t(unsigned long, start_pfn, limit_pfn);
		end_pfn = min_t(unsigned long, end_pfn, limit_pfn);
		pages += end_pfn - start_pfn;
	}

F
Fabian Frederick 已提交
1454
	return PFN_PHYS(pages);
Y
Yinghai Lu 已提交
1455 1456
}

1457 1458 1459 1460 1461 1462
/* lowest address */
phys_addr_t __init_memblock memblock_start_of_DRAM(void)
{
	return memblock.memory.regions[0].base;
}

1463
phys_addr_t __init_memblock memblock_end_of_DRAM(void)
Y
Yinghai Lu 已提交
1464 1465 1466
{
	int idx = memblock.memory.cnt - 1;

1467
	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
Y
Yinghai Lu 已提交
1468 1469
}

1470
void __init memblock_enforce_memory_limit(phys_addr_t limit)
Y
Yinghai Lu 已提交
1471
{
1472
	phys_addr_t max_addr = (phys_addr_t)ULLONG_MAX;
E
Emil Medve 已提交
1473
	struct memblock_region *r;
Y
Yinghai Lu 已提交
1474

1475
	if (!limit)
Y
Yinghai Lu 已提交
1476 1477
		return;

1478
	/* find out max address */
E
Emil Medve 已提交
1479
	for_each_memblock(memory, r) {
1480 1481 1482
		if (limit <= r->size) {
			max_addr = r->base + limit;
			break;
Y
Yinghai Lu 已提交
1483
		}
1484
		limit -= r->size;
Y
Yinghai Lu 已提交
1485
	}
1486 1487

	/* truncate both memory and reserved regions */
1488 1489 1490 1491
	memblock_remove_range(&memblock.memory, max_addr,
			      (phys_addr_t)ULLONG_MAX);
	memblock_remove_range(&memblock.reserved, max_addr,
			      (phys_addr_t)ULLONG_MAX);
Y
Yinghai Lu 已提交
1492 1493
}

1494
static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr)
1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511
{
	unsigned int left = 0, right = type->cnt;

	do {
		unsigned int mid = (right + left) / 2;

		if (addr < type->regions[mid].base)
			right = mid;
		else if (addr >= (type->regions[mid].base +
				  type->regions[mid].size))
			left = mid + 1;
		else
			return mid;
	} while (left < right);
	return -1;
}

1512
int __init memblock_is_reserved(phys_addr_t addr)
Y
Yinghai Lu 已提交
1513
{
1514 1515
	return memblock_search(&memblock.reserved, addr) != -1;
}
Y
Yinghai Lu 已提交
1516

1517
int __init_memblock memblock_is_memory(phys_addr_t addr)
1518 1519 1520 1521
{
	return memblock_search(&memblock.memory, addr) != -1;
}

1522 1523 1524 1525 1526
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
			 unsigned long *start_pfn, unsigned long *end_pfn)
{
	struct memblock_type *type = &memblock.memory;
F
Fabian Frederick 已提交
1527
	int mid = memblock_search(type, PFN_PHYS(pfn));
1528 1529 1530 1531

	if (mid == -1)
		return -1;

F
Fabian Frederick 已提交
1532 1533
	*start_pfn = PFN_DOWN(type->regions[mid].base);
	*end_pfn = PFN_DOWN(type->regions[mid].base + type->regions[mid].size);
1534 1535 1536 1537 1538

	return type->regions[mid].nid;
}
#endif

1539 1540 1541 1542 1543 1544 1545 1546 1547 1548
/**
 * memblock_is_region_memory - check if a region is a subset of memory
 * @base: base of region to check
 * @size: size of region to check
 *
 * Check if the region [@base, @base+@size) is a subset of a memory block.
 *
 * RETURNS:
 * 0 if false, non-zero if true
 */
1549
int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
1550
{
1551
	int idx = memblock_search(&memblock.memory, base);
1552
	phys_addr_t end = base + memblock_cap_size(base, &size);
1553 1554 1555

	if (idx == -1)
		return 0;
1556 1557
	return memblock.memory.regions[idx].base <= base &&
		(memblock.memory.regions[idx].base +
1558
		 memblock.memory.regions[idx].size) >= end;
Y
Yinghai Lu 已提交
1559 1560
}

1561 1562 1563 1564 1565 1566 1567 1568
/**
 * memblock_is_region_reserved - check if a region intersects reserved memory
 * @base: base of region to check
 * @size: size of region to check
 *
 * Check if the region [@base, @base+@size) intersects a reserved memory block.
 *
 * RETURNS:
1569
 * True if they intersect, false if not.
1570
 */
1571
bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
1572
{
1573
	memblock_cap_size(base, &size);
1574
	return memblock_overlaps_region(&memblock.reserved, base, size);
Y
Yinghai Lu 已提交
1575 1576
}

1577 1578 1579
void __init_memblock memblock_trim_memory(phys_addr_t align)
{
	phys_addr_t start, end, orig_start, orig_end;
E
Emil Medve 已提交
1580
	struct memblock_region *r;
1581

E
Emil Medve 已提交
1582 1583 1584
	for_each_memblock(memory, r) {
		orig_start = r->base;
		orig_end = r->base + r->size;
1585 1586 1587 1588 1589 1590 1591
		start = round_up(orig_start, align);
		end = round_down(orig_end, align);

		if (start == orig_start && end == orig_end)
			continue;

		if (start < end) {
E
Emil Medve 已提交
1592 1593
			r->base = start;
			r->size = end - start;
1594
		} else {
E
Emil Medve 已提交
1595 1596 1597
			memblock_remove_region(&memblock.memory,
					       r - memblock.memory.regions);
			r--;
1598 1599 1600
		}
	}
}
1601

1602
void __init_memblock memblock_set_current_limit(phys_addr_t limit)
1603 1604 1605 1606
{
	memblock.current_limit = limit;
}

1607 1608 1609 1610 1611
phys_addr_t __init_memblock memblock_get_current_limit(void)
{
	return memblock.current_limit;
}

T
Tejun Heo 已提交
1612
static void __init_memblock memblock_dump(struct memblock_type *type, char *name)
1613 1614
{
	unsigned long long base, size;
1615
	unsigned long flags;
1616 1617
	int i;

T
Tejun Heo 已提交
1618
	pr_info(" %s.cnt  = 0x%lx\n", name, type->cnt);
1619

T
Tejun Heo 已提交
1620 1621 1622 1623 1624 1625
	for (i = 0; i < type->cnt; i++) {
		struct memblock_region *rgn = &type->regions[i];
		char nid_buf[32] = "";

		base = rgn->base;
		size = rgn->size;
1626
		flags = rgn->flags;
T
Tejun Heo 已提交
1627 1628 1629 1630 1631
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
		if (memblock_get_region_node(rgn) != MAX_NUMNODES)
			snprintf(nid_buf, sizeof(nid_buf), " on node %d",
				 memblock_get_region_node(rgn));
#endif
1632 1633
		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes%s flags: %#lx\n",
			name, i, base, base + size - 1, size, nid_buf, flags);
1634 1635 1636
	}
}

T
Tejun Heo 已提交
1637
void __init_memblock __memblock_dump_all(void)
1638 1639
{
	pr_info("MEMBLOCK configuration:\n");
1640 1641 1642
	pr_info(" memory size = %#llx reserved size = %#llx\n",
		(unsigned long long)memblock.memory.total_size,
		(unsigned long long)memblock.reserved.total_size);
1643 1644 1645 1646 1647

	memblock_dump(&memblock.memory, "memory");
	memblock_dump(&memblock.reserved, "reserved");
}

1648
void __init memblock_allow_resize(void)
1649
{
1650
	memblock_can_resize = 1;
1651 1652 1653 1654 1655 1656 1657 1658 1659 1660
}

static int __init early_memblock(char *p)
{
	if (p && strstr(p, "debug"))
		memblock_debug = 1;
	return 0;
}
early_param("memblock", early_memblock);

1661
#if defined(CONFIG_DEBUG_FS) && !defined(CONFIG_ARCH_DISCARD_MEMBLOCK)
1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703

static int memblock_debug_show(struct seq_file *m, void *private)
{
	struct memblock_type *type = m->private;
	struct memblock_region *reg;
	int i;

	for (i = 0; i < type->cnt; i++) {
		reg = &type->regions[i];
		seq_printf(m, "%4d: ", i);
		if (sizeof(phys_addr_t) == 4)
			seq_printf(m, "0x%08lx..0x%08lx\n",
				   (unsigned long)reg->base,
				   (unsigned long)(reg->base + reg->size - 1));
		else
			seq_printf(m, "0x%016llx..0x%016llx\n",
				   (unsigned long long)reg->base,
				   (unsigned long long)(reg->base + reg->size - 1));

	}
	return 0;
}

static int memblock_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, memblock_debug_show, inode->i_private);
}

static const struct file_operations memblock_debug_fops = {
	.open = memblock_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init memblock_init_debugfs(void)
{
	struct dentry *root = debugfs_create_dir("memblock", NULL);
	if (!root)
		return -ENXIO;
	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);
1704 1705 1706
#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
	debugfs_create_file("physmem", S_IRUGO, root, &memblock.physmem, &memblock_debug_fops);
#endif
1707 1708 1709 1710 1711 1712

	return 0;
}
__initcall(memblock_init_debugfs);

#endif /* CONFIG_DEBUG_FS */