memblock.c 20.9 KB
Newer Older
Y
Yinghai Lu 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13
/*
 * Procedures for maintaining information about logical memory blocks.
 *
 * Peter Bergner, IBM Corp.	June 2001.
 * Copyright (C) 2001 Peter Bergner.
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

#include <linux/kernel.h>
14
#include <linux/slab.h>
Y
Yinghai Lu 已提交
15 16
#include <linux/init.h>
#include <linux/bitops.h>
17
#include <linux/poison.h>
18
#include <linux/pfn.h>
19 20
#include <linux/debugfs.h>
#include <linux/seq_file.h>
Y
Yinghai Lu 已提交
21 22 23 24
#include <linux/memblock.h>

struct memblock memblock;

25 26
int memblock_debug;
int memblock_can_resize;
27 28
static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1];
static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1];
Y
Yinghai Lu 已提交
29

30 31 32 33 34 35 36 37 38 39 40
/* inline so we don't get a warning when pr_debug is compiled out */
static inline const char *memblock_type_name(struct memblock_type *type)
{
	if (type == &memblock.memory)
		return "memory";
	else if (type == &memblock.reserved)
		return "reserved";
	else
		return "unknown";
}

41 42 43
/*
 * Address comparison utilities
 */
Y
Yinghai Lu 已提交
44

45
static phys_addr_t memblock_align_down(phys_addr_t addr, phys_addr_t size)
Y
Yinghai Lu 已提交
46
{
47
	return addr & ~(size - 1);
Y
Yinghai Lu 已提交
48 49
}

50
static phys_addr_t memblock_align_up(phys_addr_t addr, phys_addr_t size)
Y
Yinghai Lu 已提交
51
{
52
	return (addr + (size - 1)) & ~(size - 1);
Y
Yinghai Lu 已提交
53 54
}

55 56
static unsigned long memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1,
				       phys_addr_t base2, phys_addr_t size2)
Y
Yinghai Lu 已提交
57 58 59 60
{
	return ((base1 < (base2 + size2)) && (base2 < (base1 + size1)));
}

61 62
static long memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1,
			       phys_addr_t base2, phys_addr_t size2)
Y
Yinghai Lu 已提交
63 64 65 66 67 68 69 70 71
{
	if (base2 == base1 + size1)
		return 1;
	else if (base1 == base2 + size2)
		return -1;

	return 0;
}

72
static long memblock_regions_adjacent(struct memblock_type *type,
73
				 unsigned long r1, unsigned long r2)
Y
Yinghai Lu 已提交
74
{
75 76 77 78
	phys_addr_t base1 = type->regions[r1].base;
	phys_addr_t size1 = type->regions[r1].size;
	phys_addr_t base2 = type->regions[r2].base;
	phys_addr_t size2 = type->regions[r2].size;
Y
Yinghai Lu 已提交
79 80 81 82

	return memblock_addrs_adjacent(base1, size1, base2, size2);
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
long memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
{
	unsigned long i;

	for (i = 0; i < type->cnt; i++) {
		phys_addr_t rgnbase = type->regions[i].base;
		phys_addr_t rgnsize = type->regions[i].size;
		if (memblock_addrs_overlap(base, size, rgnbase, rgnsize))
			break;
	}

	return (i < type->cnt) ? i : -1;
}

/*
 * Find, allocate, deallocate or reserve unreserved regions. All allocations
 * are top-down.
 */

static phys_addr_t __init memblock_find_region(phys_addr_t start, phys_addr_t end,
					  phys_addr_t size, phys_addr_t align)
{
	phys_addr_t base, res_base;
	long j;

	base = memblock_align_down((end - size), align);
	while (start <= base) {
		j = memblock_overlaps_region(&memblock.reserved, base, size);
		if (j < 0)
			return base;
		res_base = memblock.reserved.regions[j].base;
		if (res_base < size)
			break;
		base = memblock_align_down(res_base - size, align);
	}

	return MEMBLOCK_ERROR;
}

122 123
static phys_addr_t __init memblock_find_base(phys_addr_t size, phys_addr_t align,
					phys_addr_t start, phys_addr_t end)
124 125 126 127 128 129 130 131
{
	long i;

	BUG_ON(0 == size);

	size = memblock_align_up(size, align);

	/* Pump up max_addr */
132 133
	if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
		end = memblock.current_limit;
134 135 136 137 138 139 140 141

	/* We do a top-down search, this tends to limit memory
	 * fragmentation by keeping early boot allocs near the
	 * top of memory
	 */
	for (i = memblock.memory.cnt - 1; i >= 0; i--) {
		phys_addr_t memblockbase = memblock.memory.regions[i].base;
		phys_addr_t memblocksize = memblock.memory.regions[i].size;
142
		phys_addr_t bottom, top, found;
143 144 145

		if (memblocksize < size)
			continue;
146 147 148 149 150 151 152 153 154
		if ((memblockbase + memblocksize) <= start)
			break;
		bottom = max(memblockbase, start);
		top = min(memblockbase + memblocksize, end);
		if (bottom >= top)
			continue;
		found = memblock_find_region(bottom, top, size, align);
		if (found != MEMBLOCK_ERROR)
			return found;
155 156 157 158
	}
	return MEMBLOCK_ERROR;
}

159
static void memblock_remove_region(struct memblock_type *type, unsigned long r)
Y
Yinghai Lu 已提交
160 161 162
{
	unsigned long i;

163 164 165
	for (i = r; i < type->cnt - 1; i++) {
		type->regions[i].base = type->regions[i + 1].base;
		type->regions[i].size = type->regions[i + 1].size;
Y
Yinghai Lu 已提交
166
	}
167
	type->cnt--;
Y
Yinghai Lu 已提交
168 169 170
}

/* Assumption: base addr of region 1 < base addr of region 2 */
171
static void memblock_coalesce_regions(struct memblock_type *type,
Y
Yinghai Lu 已提交
172 173
		unsigned long r1, unsigned long r2)
{
174 175
	type->regions[r1].size += type->regions[r2].size;
	memblock_remove_region(type, r2);
Y
Yinghai Lu 已提交
176 177
}

178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211
/* Defined below but needed now */
static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size);

static int memblock_double_array(struct memblock_type *type)
{
	struct memblock_region *new_array, *old_array;
	phys_addr_t old_size, new_size, addr;
	int use_slab = slab_is_available();

	/* We don't allow resizing until we know about the reserved regions
	 * of memory that aren't suitable for allocation
	 */
	if (!memblock_can_resize)
		return -1;

	/* Calculate new doubled size */
	old_size = type->max * sizeof(struct memblock_region);
	new_size = old_size << 1;

	/* Try to find some space for it.
	 *
	 * WARNING: We assume that either slab_is_available() and we use it or
	 * we use MEMBLOCK for allocations. That means that this is unsafe to use
	 * when bootmem is currently active (unless bootmem itself is implemented
	 * on top of MEMBLOCK which isn't the case yet)
	 *
	 * This should however not be an issue for now, as we currently only
	 * call into MEMBLOCK while it's still active, or much later when slab is
	 * active for memory hotplug operations
	 */
	if (use_slab) {
		new_array = kmalloc(new_size, GFP_KERNEL);
		addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array);
	} else
212
		addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE);
213 214 215 216 217 218 219
	if (addr == MEMBLOCK_ERROR) {
		pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
		       memblock_type_name(type), type->max, type->max * 2);
		return -1;
	}
	new_array = __va(addr);

220 221 222
	memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
		 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);

223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251
	/* Found space, we now need to move the array over before
	 * we add the reserved region since it may be our reserved
	 * array itself that is full.
	 */
	memcpy(new_array, type->regions, old_size);
	memset(new_array + type->max, 0, old_size);
	old_array = type->regions;
	type->regions = new_array;
	type->max <<= 1;

	/* If we use SLAB that's it, we are done */
	if (use_slab)
		return 0;

	/* Add the new reserved region now. Should not fail ! */
	BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0);

	/* If the array wasn't our static init one, then free it. We only do
	 * that before SLAB is available as later on, we don't know whether
	 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
	 * anyways
	 */
	if (old_array != memblock_memory_init_regions &&
	    old_array != memblock_reserved_init_regions)
		memblock_free(__pa(old_array), old_size);

	return 0;
}

252 253 254 255 256 257
extern int __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1,
					  phys_addr_t addr2, phys_addr_t size2)
{
	return 1;
}

258
static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
259 260 261 262
{
	unsigned long coalesced = 0;
	long adjacent, i;

263 264 265
	if ((type->cnt == 1) && (type->regions[0].size == 0)) {
		type->regions[0].base = base;
		type->regions[0].size = size;
Y
Yinghai Lu 已提交
266 267 268 269
		return 0;
	}

	/* First try and coalesce this MEMBLOCK with another. */
270
	for (i = 0; i < type->cnt; i++) {
271 272
		phys_addr_t rgnbase = type->regions[i].base;
		phys_addr_t rgnsize = type->regions[i].size;
Y
Yinghai Lu 已提交
273 274 275 276 277 278

		if ((rgnbase == base) && (rgnsize == size))
			/* Already have this region, so we're done */
			return 0;

		adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize);
279 280 281 282
		/* Check if arch allows coalescing */
		if (adjacent != 0 && type == &memblock.memory &&
		    !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize))
			break;
Y
Yinghai Lu 已提交
283
		if (adjacent > 0) {
284 285
			type->regions[i].base -= size;
			type->regions[i].size += size;
Y
Yinghai Lu 已提交
286 287 288
			coalesced++;
			break;
		} else if (adjacent < 0) {
289
			type->regions[i].size += size;
Y
Yinghai Lu 已提交
290 291 292 293 294
			coalesced++;
			break;
		}
	}

295 296 297 298 299 300 301 302
	/* If we plugged a hole, we may want to also coalesce with the
	 * next region
	 */
	if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) &&
	    ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base,
							     type->regions[i].size,
							     type->regions[i+1].base,
							     type->regions[i+1].size)))) {
303
		memblock_coalesce_regions(type, i, i+1);
Y
Yinghai Lu 已提交
304 305 306 307 308
		coalesced++;
	}

	if (coalesced)
		return coalesced;
309 310 311 312 313

	/* If we are out of space, we fail. It's too late to resize the array
	 * but then this shouldn't have happened in the first place.
	 */
	if (WARN_ON(type->cnt >= type->max))
Y
Yinghai Lu 已提交
314 315 316
		return -1;

	/* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */
317 318 319 320
	for (i = type->cnt - 1; i >= 0; i--) {
		if (base < type->regions[i].base) {
			type->regions[i+1].base = type->regions[i].base;
			type->regions[i+1].size = type->regions[i].size;
Y
Yinghai Lu 已提交
321
		} else {
322 323
			type->regions[i+1].base = base;
			type->regions[i+1].size = size;
Y
Yinghai Lu 已提交
324 325 326 327
			break;
		}
	}

328 329 330
	if (base < type->regions[0].base) {
		type->regions[0].base = base;
		type->regions[0].size = size;
Y
Yinghai Lu 已提交
331
	}
332
	type->cnt++;
Y
Yinghai Lu 已提交
333

334 335 336 337 338 339 340 341
	/* The array is full ? Try to resize it. If that fails, we undo
	 * our allocation and return an error
	 */
	if (type->cnt == type->max && memblock_double_array(type)) {
		type->cnt--;
		return -1;
	}

Y
Yinghai Lu 已提交
342 343 344
	return 0;
}

345
long memblock_add(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
346
{
347
	return memblock_add_region(&memblock.memory, base, size);
Y
Yinghai Lu 已提交
348 349 350

}

351
static long __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
352
{
353 354
	phys_addr_t rgnbegin, rgnend;
	phys_addr_t end = base + size;
Y
Yinghai Lu 已提交
355 356 357 358 359
	int i;

	rgnbegin = rgnend = 0; /* supress gcc warnings */

	/* Find the region where (base, size) belongs to */
360 361 362
	for (i=0; i < type->cnt; i++) {
		rgnbegin = type->regions[i].base;
		rgnend = rgnbegin + type->regions[i].size;
Y
Yinghai Lu 已提交
363 364 365 366 367 368

		if ((rgnbegin <= base) && (end <= rgnend))
			break;
	}

	/* Didn't find the region */
369
	if (i == type->cnt)
Y
Yinghai Lu 已提交
370 371 372 373
		return -1;

	/* Check to see if we are removing entire region */
	if ((rgnbegin == base) && (rgnend == end)) {
374
		memblock_remove_region(type, i);
Y
Yinghai Lu 已提交
375 376 377 378 379
		return 0;
	}

	/* Check to see if region is matching at the front */
	if (rgnbegin == base) {
380 381
		type->regions[i].base = end;
		type->regions[i].size -= size;
Y
Yinghai Lu 已提交
382 383 384 385 386
		return 0;
	}

	/* Check to see if the region is matching at the end */
	if (rgnend == end) {
387
		type->regions[i].size -= size;
Y
Yinghai Lu 已提交
388 389 390 391 392 393 394
		return 0;
	}

	/*
	 * We need to split the entry -  adjust the current one to the
	 * beginging of the hole and add the region after hole.
	 */
395 396
	type->regions[i].size = base - type->regions[i].base;
	return memblock_add_region(type, end, rgnend - end);
Y
Yinghai Lu 已提交
397 398
}

399
long memblock_remove(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
400 401 402 403
{
	return __memblock_remove(&memblock.memory, base, size);
}

404
long __init memblock_free(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
405 406 407 408
{
	return __memblock_remove(&memblock.reserved, base, size);
}

409
long __init memblock_reserve(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
410
{
411
	struct memblock_type *_rgn = &memblock.reserved;
Y
Yinghai Lu 已提交
412 413 414 415 416 417

	BUG_ON(0 == size);

	return memblock_add_region(_rgn, base, size);
}

418
phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
Y
Yinghai Lu 已提交
419
{
420
	phys_addr_t found;
Y
Yinghai Lu 已提交
421

422 423 424 425
	/* We align the size to limit fragmentation. Without this, a lot of
	 * small allocs quickly eat up the whole reserve array on sparc
	 */
	size = memblock_align_up(size, align);
Y
Yinghai Lu 已提交
426

427
	found = memblock_find_base(size, align, 0, max_addr);
428 429 430
	if (found != MEMBLOCK_ERROR &&
	    memblock_add_region(&memblock.reserved, found, size) >= 0)
		return found;
Y
Yinghai Lu 已提交
431

432
	return 0;
Y
Yinghai Lu 已提交
433 434
}

435
phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr)
Y
Yinghai Lu 已提交
436
{
437 438 439 440 441 442 443 444 445
	phys_addr_t alloc;

	alloc = __memblock_alloc_base(size, align, max_addr);

	if (alloc == 0)
		panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n",
		      (unsigned long long) size, (unsigned long long) max_addr);

	return alloc;
Y
Yinghai Lu 已提交
446 447
}

448
phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align)
Y
Yinghai Lu 已提交
449
{
450 451
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
Y
Yinghai Lu 已提交
452 453


454 455 456
/*
 * Additional node-local allocators. Search for node memory is bottom up
 * and walks memblock regions within that node bottom-up as well, but allocation
457 458 459 460 461
 * within an memblock region is top-down. XXX I plan to fix that at some stage
 *
 * WARNING: Only available after early_node_map[] has been populated,
 * on some architectures, that is after all the calls to add_active_range()
 * have been done to populate it.
462
 */
Y
Yinghai Lu 已提交
463

464
phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid)
465
{
466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486
#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
	/*
	 * This code originates from sparc which really wants use to walk by addresses
	 * and returns the nid. This is not very convenient for early_pfn_map[] users
	 * as the map isn't sorted yet, and it really wants to be walked by nid.
	 *
	 * For now, I implement the inefficient method below which walks the early
	 * map multiple times. Eventually we may want to use an ARCH config option
	 * to implement a completely different method for both case.
	 */
	unsigned long start_pfn, end_pfn;
	int i;

	for (i = 0; i < MAX_NUMNODES; i++) {
		get_pfn_range_for_nid(i, &start_pfn, &end_pfn);
		if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn))
			continue;
		*nid = i;
		return min(end, PFN_PHYS(end_pfn));
	}
#endif
487 488 489 490 491
	*nid = 0;

	return end;
}

492 493 494
static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp,
					       phys_addr_t size,
					       phys_addr_t align, int nid)
Y
Yinghai Lu 已提交
495
{
496
	phys_addr_t start, end;
Y
Yinghai Lu 已提交
497 498 499 500 501 502

	start = mp->base;
	end = start + mp->size;

	start = memblock_align_up(start, align);
	while (start < end) {
503
		phys_addr_t this_end;
Y
Yinghai Lu 已提交
504 505
		int this_nid;

506
		this_end = memblock_nid_range(start, end, &this_nid);
Y
Yinghai Lu 已提交
507
		if (this_nid == nid) {
508
			phys_addr_t ret = memblock_find_region(start, this_end, size, align);
509
			if (ret != MEMBLOCK_ERROR &&
510
			    memblock_add_region(&memblock.reserved, ret, size) >= 0)
Y
Yinghai Lu 已提交
511 512 513 514 515
				return ret;
		}
		start = this_end;
	}

516
	return MEMBLOCK_ERROR;
Y
Yinghai Lu 已提交
517 518
}

519
phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid)
Y
Yinghai Lu 已提交
520
{
521
	struct memblock_type *mem = &memblock.memory;
Y
Yinghai Lu 已提交
522 523 524 525
	int i;

	BUG_ON(0 == size);

526 527 528 529 530
	/* We align the size to limit fragmentation. Without this, a lot of
	 * small allocs quickly eat up the whole reserve array on sparc
	 */
	size = memblock_align_up(size, align);

531 532 533 534
	/* We do a bottom-up search for a region with the right
	 * nid since that's easier considering how memblock_nid_range()
	 * works
	 */
Y
Yinghai Lu 已提交
535
	for (i = 0; i < mem->cnt; i++) {
536
		phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i],
Y
Yinghai Lu 已提交
537
					       size, align, nid);
538
		if (ret != MEMBLOCK_ERROR)
Y
Yinghai Lu 已提交
539 540 541
			return ret;
	}

542 543 544 545 546 547 548 549 550
	return 0;
}

phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid)
{
	phys_addr_t res = memblock_alloc_nid(size, align, nid);

	if (res)
		return res;
551
	return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
Y
Yinghai Lu 已提交
552 553
}

554 555 556 557 558

/*
 * Remaining API functions
 */

Y
Yinghai Lu 已提交
559
/* You must call memblock_analyze() before this. */
560
phys_addr_t __init memblock_phys_mem_size(void)
Y
Yinghai Lu 已提交
561
{
562
	return memblock.memory_size;
Y
Yinghai Lu 已提交
563 564
}

565
phys_addr_t memblock_end_of_DRAM(void)
Y
Yinghai Lu 已提交
566 567 568
{
	int idx = memblock.memory.cnt - 1;

569
	return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size);
Y
Yinghai Lu 已提交
570 571 572
}

/* You must call memblock_analyze() after this. */
573
void __init memblock_enforce_memory_limit(phys_addr_t memory_limit)
Y
Yinghai Lu 已提交
574 575
{
	unsigned long i;
576
	phys_addr_t limit;
577
	struct memblock_region *p;
Y
Yinghai Lu 已提交
578 579 580 581 582 583 584

	if (!memory_limit)
		return;

	/* Truncate the memblock regions to satisfy the memory limit. */
	limit = memory_limit;
	for (i = 0; i < memblock.memory.cnt; i++) {
585 586
		if (limit > memblock.memory.regions[i].size) {
			limit -= memblock.memory.regions[i].size;
Y
Yinghai Lu 已提交
587 588 589
			continue;
		}

590
		memblock.memory.regions[i].size = limit;
Y
Yinghai Lu 已提交
591 592 593 594 595 596 597 598
		memblock.memory.cnt = i + 1;
		break;
	}

	memory_limit = memblock_end_of_DRAM();

	/* And truncate any reserves above the limit also. */
	for (i = 0; i < memblock.reserved.cnt; i++) {
599
		p = &memblock.reserved.regions[i];
Y
Yinghai Lu 已提交
600 601 602 603 604 605 606 607 608 609 610 611 612

		if (p->base > memory_limit)
			p->size = 0;
		else if ((p->base + p->size) > memory_limit)
			p->size = memory_limit - p->base;

		if (p->size == 0) {
			memblock_remove_region(&memblock.reserved, i);
			i--;
		}
	}
}

613
static int memblock_search(struct memblock_type *type, phys_addr_t addr)
614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630
{
	unsigned int left = 0, right = type->cnt;

	do {
		unsigned int mid = (right + left) / 2;

		if (addr < type->regions[mid].base)
			right = mid;
		else if (addr >= (type->regions[mid].base +
				  type->regions[mid].size))
			left = mid + 1;
		else
			return mid;
	} while (left < right);
	return -1;
}

631
int __init memblock_is_reserved(phys_addr_t addr)
Y
Yinghai Lu 已提交
632
{
633 634
	return memblock_search(&memblock.reserved, addr) != -1;
}
Y
Yinghai Lu 已提交
635

636
int memblock_is_memory(phys_addr_t addr)
637 638 639 640
{
	return memblock_search(&memblock.memory, addr) != -1;
}

641
int memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
642 643 644 645 646 647 648 649
{
	int idx = memblock_search(&memblock.reserved, base);

	if (idx == -1)
		return 0;
	return memblock.reserved.regions[idx].base <= base &&
		(memblock.reserved.regions[idx].base +
		 memblock.reserved.regions[idx].size) >= (base + size);
Y
Yinghai Lu 已提交
650 651
}

652
int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
Y
Yinghai Lu 已提交
653
{
654
	return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
Y
Yinghai Lu 已提交
655 656
}

657

658
void __init memblock_set_current_limit(phys_addr_t limit)
659 660 661 662
{
	memblock.current_limit = limit;
}

663 664 665 666 667 668 669 670 671 672 673
static void memblock_dump(struct memblock_type *region, char *name)
{
	unsigned long long base, size;
	int i;

	pr_info(" %s.cnt  = 0x%lx\n", name, region->cnt);

	for (i = 0; i < region->cnt; i++) {
		base = region->regions[i].base;
		size = region->regions[i].size;

674
		pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n",
675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704
		    name, i, base, base + size - 1, size);
	}
}

void memblock_dump_all(void)
{
	if (!memblock_debug)
		return;

	pr_info("MEMBLOCK configuration:\n");
	pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size);

	memblock_dump(&memblock.memory, "memory");
	memblock_dump(&memblock.reserved, "reserved");
}

void __init memblock_analyze(void)
{
	int i;

	/* Check marker in the unused last array entry */
	WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base
		!= (phys_addr_t)RED_INACTIVE);
	WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base
		!= (phys_addr_t)RED_INACTIVE);

	memblock.memory_size = 0;

	for (i = 0; i < memblock.memory.cnt; i++)
		memblock.memory_size += memblock.memory.regions[i].size;
705 706 707

	/* We allow resizing from there */
	memblock_can_resize = 1;
708 709
}

710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736
void __init memblock_init(void)
{
	/* Hookup the initial arrays */
	memblock.memory.regions	= memblock_memory_init_regions;
	memblock.memory.max		= INIT_MEMBLOCK_REGIONS;
	memblock.reserved.regions	= memblock_reserved_init_regions;
	memblock.reserved.max	= INIT_MEMBLOCK_REGIONS;

	/* Write a marker in the unused last array entry */
	memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;
	memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE;

	/* Create a dummy zero size MEMBLOCK which will get coalesced away later.
	 * This simplifies the memblock_add() code below...
	 */
	memblock.memory.regions[0].base = 0;
	memblock.memory.regions[0].size = 0;
	memblock.memory.cnt = 1;

	/* Ditto. */
	memblock.reserved.regions[0].base = 0;
	memblock.reserved.regions[0].size = 0;
	memblock.reserved.cnt = 1;

	memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE;
}

737 738 739 740 741 742 743 744
static int __init early_memblock(char *p)
{
	if (p && strstr(p, "debug"))
		memblock_debug = 1;
	return 0;
}
early_param("memblock", early_memblock);

745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793
#ifdef CONFIG_DEBUG_FS

static int memblock_debug_show(struct seq_file *m, void *private)
{
	struct memblock_type *type = m->private;
	struct memblock_region *reg;
	int i;

	for (i = 0; i < type->cnt; i++) {
		reg = &type->regions[i];
		seq_printf(m, "%4d: ", i);
		if (sizeof(phys_addr_t) == 4)
			seq_printf(m, "0x%08lx..0x%08lx\n",
				   (unsigned long)reg->base,
				   (unsigned long)(reg->base + reg->size - 1));
		else
			seq_printf(m, "0x%016llx..0x%016llx\n",
				   (unsigned long long)reg->base,
				   (unsigned long long)(reg->base + reg->size - 1));

	}
	return 0;
}

static int memblock_debug_open(struct inode *inode, struct file *file)
{
	return single_open(file, memblock_debug_show, inode->i_private);
}

static const struct file_operations memblock_debug_fops = {
	.open = memblock_debug_open,
	.read = seq_read,
	.llseek = seq_lseek,
	.release = single_release,
};

static int __init memblock_init_debugfs(void)
{
	struct dentry *root = debugfs_create_dir("memblock", NULL);
	if (!root)
		return -ENXIO;
	debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops);
	debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops);

	return 0;
}
__initcall(memblock_init_debugfs);

#endif /* CONFIG_DEBUG_FS */