bootmem.c 21.3 KB
Newer Older
L
Linus Torvalds 已提交
1
/*
2
 *  bootmem - A boot-time physical memory allocator and configurator
L
Linus Torvalds 已提交
3 4
 *
 *  Copyright (C) 1999 Ingo Molnar
5 6
 *                1999 Kanoj Sarcar, SGI
 *                2008 Johannes Weiner
L
Linus Torvalds 已提交
7
 *
8 9
 * Access to this subsystem has to be serialized externally (which is true
 * for the boot process anyway).
L
Linus Torvalds 已提交
10 11
 */
#include <linux/init.h>
12
#include <linux/pfn.h>
13
#include <linux/slab.h>
L
Linus Torvalds 已提交
14
#include <linux/bootmem.h>
15
#include <linux/export.h>
16
#include <linux/kmemleak.h>
17
#include <linux/range.h>
18
#include <linux/memblock.h>
19 20

#include <asm/bug.h>
L
Linus Torvalds 已提交
21
#include <asm/io.h>
22
#include <asm/processor.h>
23

L
Linus Torvalds 已提交
24 25
#include "internal.h"

26 27 28 29 30 31 32
#ifndef CONFIG_NEED_MULTIPLE_NODES
struct pglist_data __refdata contig_page_data = {
	.bdata = &bootmem_node_data[0]
};
EXPORT_SYMBOL(contig_page_data);
#endif

L
Linus Torvalds 已提交
33 34 35 36
unsigned long max_low_pfn;
unsigned long min_low_pfn;
unsigned long max_pfn;

37 38
bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;

39 40
static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);

41 42 43 44 45 46 47 48 49 50 51 52 53
static int bootmem_debug;

static int __init bootmem_debug_setup(char *buf)
{
	bootmem_debug = 1;
	return 0;
}
early_param("bootmem_debug", bootmem_debug_setup);

#define bdebug(fmt, args...) ({				\
	if (unlikely(bootmem_debug))			\
		printk(KERN_INFO			\
			"bootmem::%s " fmt,		\
54
			__func__, ## args);		\
55 56
})

57
static unsigned long __init bootmap_bytes(unsigned long pages)
58
{
59
	unsigned long bytes = DIV_ROUND_UP(pages, 8);
60

61
	return ALIGN(bytes, sizeof(long));
62 63
}

64 65 66 67
/**
 * bootmem_bootmap_pages - calculate bitmap size in pages
 * @pages: number of pages the bitmap has to represent
 */
68
unsigned long __init bootmem_bootmap_pages(unsigned long pages)
L
Linus Torvalds 已提交
69
{
70
	unsigned long bytes = bootmap_bytes(pages);
L
Linus Torvalds 已提交
71

72
	return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
L
Linus Torvalds 已提交
73
}
74

75 76 77
/*
 * link bdata in order
 */
78
static void __init link_bootmem(bootmem_data_t *bdata)
79
{
80
	bootmem_data_t *ent;
81

82 83 84 85 86
	list_for_each_entry(ent, &bdata_list, list) {
		if (bdata->node_min_pfn < ent->node_min_pfn) {
			list_add_tail(&bdata->list, &ent->list);
			return;
		}
87
	}
88 89

	list_add_tail(&bdata->list, &bdata_list);
90 91
}

L
Linus Torvalds 已提交
92 93 94
/*
 * Called once to set up the allocator itself.
 */
95
static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
L
Linus Torvalds 已提交
96 97
	unsigned long mapstart, unsigned long start, unsigned long end)
{
98
	unsigned long mapsize;
L
Linus Torvalds 已提交
99

100
	mminit_validate_memmodel_limits(&start, &end);
101
	bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
102
	bdata->node_min_pfn = start;
L
Linus Torvalds 已提交
103
	bdata->node_low_pfn = end;
104
	link_bootmem(bdata);
L
Linus Torvalds 已提交
105 106 107 108 109

	/*
	 * Initially all pages are reserved - setup_arch() has to
	 * register free RAM areas explicitly.
	 */
110
	mapsize = bootmap_bytes(end - start);
L
Linus Torvalds 已提交
111 112
	memset(bdata->node_bootmem_map, 0xff, mapsize);

113 114 115
	bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
		bdata - bootmem_node_data, start, mapstart, end, mapsize);

L
Linus Torvalds 已提交
116 117 118
	return mapsize;
}

119 120 121 122 123 124 125 126 127
/**
 * init_bootmem_node - register a node as boot memory
 * @pgdat: node to register
 * @freepfn: pfn where the bitmap for this node is to be placed
 * @startpfn: first pfn on the node
 * @endpfn: first pfn after the node
 *
 * Returns the number of bytes needed to hold the bitmap for this node.
 */
128 129 130 131 132 133
unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
				unsigned long startpfn, unsigned long endpfn)
{
	return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
}

134 135 136 137 138 139 140
/**
 * init_bootmem - register boot memory
 * @start: pfn where the bitmap is to be placed
 * @pages: number of available physical pages
 *
 * Returns the number of bytes needed to hold the bitmap.
 */
141 142 143 144 145 146
unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
{
	max_low_pfn = pages;
	min_low_pfn = start;
	return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
}
147

F
FUJITA Tomonori 已提交
148 149
/*
 * free_bootmem_late - free bootmem pages directly to page allocator
150
 * @addr: starting physical address of the range
F
FUJITA Tomonori 已提交
151 152 153 154 155 156
 * @size: size of the range in bytes
 *
 * This is only useful when the bootmem allocator has already been torn
 * down, but we are still initializing the system.  Pages are given directly
 * to the page allocator, no bootmem metadata is updated because it is gone.
 */
157
void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
F
FUJITA Tomonori 已提交
158 159 160
{
	unsigned long cursor, end;

161
	kmemleak_free_part(__va(physaddr), size);
F
FUJITA Tomonori 已提交
162

163 164
	cursor = PFN_UP(physaddr);
	end = PFN_DOWN(physaddr + size);
F
FUJITA Tomonori 已提交
165 166 167 168 169 170 171

	for (; cursor < end; cursor++) {
		__free_pages_bootmem(pfn_to_page(cursor), 0);
		totalram_pages++;
	}
}

172 173 174
static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
{
	struct page *page;
175 176 177 178 179
	unsigned long start, end, pages, count = 0;

	if (!bdata->node_bootmem_map)
		return 0;

180
	start = bdata->node_min_pfn;
181 182
	end = bdata->node_low_pfn;

183 184
	bdebug("nid=%td start=%lx end=%lx\n",
		bdata - bootmem_node_data, start, end);
185

186 187
	while (start < end) {
		unsigned long *map, idx, vec;
188
		unsigned shift;
189

190
		map = bdata->node_bootmem_map;
191
		idx = start - bdata->node_min_pfn;
192 193 194 195 196
		shift = idx & (BITS_PER_LONG - 1);
		/*
		 * vec holds at most BITS_PER_LONG map bits,
		 * bit 0 corresponds to start.
		 */
197
		vec = ~map[idx / BITS_PER_LONG];
198 199 200 201 202 203 204

		if (shift) {
			vec >>= shift;
			if (end - start >= BITS_PER_LONG)
				vec |= ~map[idx / BITS_PER_LONG + 1] <<
					(BITS_PER_LONG - shift);
		}
205 206 207 208 209 210
		/*
		 * If we have a properly aligned and fully unreserved
		 * BITS_PER_LONG block of pages in front of us, free
		 * it in one go.
		 */
		if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
211 212 213
			int order = ilog2(BITS_PER_LONG);

			__free_pages_bootmem(pfn_to_page(start), order);
214
			count += BITS_PER_LONG;
215
			start += BITS_PER_LONG;
216
		} else {
217
			unsigned long cur = start;
218

219 220
			start = ALIGN(start + 1, BITS_PER_LONG);
			while (vec && cur != start) {
221
				if (vec & 1) {
222
					page = pfn_to_page(cur);
223
					__free_pages_bootmem(page, 0);
224
					count++;
225
				}
226
				vec >>= 1;
227
				++cur;
228 229 230 231 232
			}
		}
	}

	page = virt_to_page(bdata->node_bootmem_map);
233
	pages = bdata->node_low_pfn - bdata->node_min_pfn;
234 235
	pages = bootmem_bootmap_pages(pages);
	count += pages;
236
	while (pages--)
237
		__free_pages_bootmem(page++, 0);
238

239 240
	bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);

241 242 243
	return count;
}

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
static void reset_node_lowmem_managed_pages(pg_data_t *pgdat)
{
	struct zone *z;

	/*
	 * In free_area_init_core(), highmem zone's managed_pages is set to
	 * present_pages, and bootmem allocator doesn't allocate from highmem
	 * zones. So there's no need to recalculate managed_pages because all
	 * highmem pages will be managed by the buddy system. Here highmem
	 * zone also includes highmem movable zone.
	 */
	for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
		if (!is_highmem(z))
			z->managed_pages = 0;
}

260 261 262 263 264 265
/**
 * free_all_bootmem_node - release a node's free pages to the buddy allocator
 * @pgdat: node to be released
 *
 * Returns the number of pages actually released.
 */
266 267 268
unsigned long __init free_all_bootmem_node(pg_data_t *pgdat)
{
	register_page_bootmem_info_node(pgdat);
269
	reset_node_lowmem_managed_pages(pgdat);
270 271 272
	return free_all_bootmem_core(pgdat->bdata);
}

273 274 275 276 277
/**
 * free_all_bootmem - release free pages to the buddy allocator
 *
 * Returns the number of pages actually released.
 */
278 279
unsigned long __init free_all_bootmem(void)
{
280 281
	unsigned long total_pages = 0;
	bootmem_data_t *bdata;
282 283 284 285
	struct pglist_data *pgdat;

	for_each_online_pgdat(pgdat)
		reset_node_lowmem_managed_pages(pgdat);
286 287 288 289 290

	list_for_each_entry(bdata, &bdata_list, list)
		total_pages += free_all_bootmem_core(bdata);

	return total_pages;
291 292
}

J
Johannes Weiner 已提交
293 294 295 296 297 298
static void __init __free(bootmem_data_t *bdata,
			unsigned long sidx, unsigned long eidx)
{
	unsigned long idx;

	bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
299 300
		sidx + bdata->node_min_pfn,
		eidx + bdata->node_min_pfn);
J
Johannes Weiner 已提交
301

302 303 304
	if (bdata->hint_idx > sidx)
		bdata->hint_idx = sidx;

J
Johannes Weiner 已提交
305 306 307 308 309 310 311 312 313 314 315 316 317
	for (idx = sidx; idx < eidx; idx++)
		if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
			BUG();
}

static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
			unsigned long eidx, int flags)
{
	unsigned long idx;
	int exclusive = flags & BOOTMEM_EXCLUSIVE;

	bdebug("nid=%td start=%lx end=%lx flags=%x\n",
		bdata - bootmem_node_data,
318 319
		sidx + bdata->node_min_pfn,
		eidx + bdata->node_min_pfn,
J
Johannes Weiner 已提交
320 321 322 323 324 325 326 327 328
		flags);

	for (idx = sidx; idx < eidx; idx++)
		if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
			if (exclusive) {
				__free(bdata, sidx, idx);
				return -EBUSY;
			}
			bdebug("silent double reserve of PFN %lx\n",
329
				idx + bdata->node_min_pfn);
J
Johannes Weiner 已提交
330 331 332 333
		}
	return 0;
}

334 335 336
static int __init mark_bootmem_node(bootmem_data_t *bdata,
				unsigned long start, unsigned long end,
				int reserve, int flags)
337 338 339
{
	unsigned long sidx, eidx;

340 341
	bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
		bdata - bootmem_node_data, start, end, reserve, flags);
342

343
	BUG_ON(start < bdata->node_min_pfn);
344
	BUG_ON(end > bdata->node_low_pfn);
345

346 347
	sidx = start - bdata->node_min_pfn;
	eidx = end - bdata->node_min_pfn;
348

349 350
	if (reserve)
		return __reserve(bdata, sidx, eidx, flags);
351
	else
352 353 354 355 356 357 358 359 360 361 362 363 364 365 366
		__free(bdata, sidx, eidx);
	return 0;
}

static int __init mark_bootmem(unsigned long start, unsigned long end,
				int reserve, int flags)
{
	unsigned long pos;
	bootmem_data_t *bdata;

	pos = start;
	list_for_each_entry(bdata, &bdata_list, list) {
		int err;
		unsigned long max;

367 368
		if (pos < bdata->node_min_pfn ||
		    pos >= bdata->node_low_pfn) {
369 370 371 372 373
			BUG_ON(pos != start);
			continue;
		}

		max = min(bdata->node_low_pfn, end);
374

375 376 377 378 379
		err = mark_bootmem_node(bdata, pos, max, reserve, flags);
		if (reserve && err) {
			mark_bootmem(start, pos, 0, 0);
			return err;
		}
380

381 382 383 384 385
		if (max == end)
			return 0;
		pos = bdata->node_low_pfn;
	}
	BUG();
386 387
}

388 389 390 391 392 393 394 395
/**
 * free_bootmem_node - mark a page range as usable
 * @pgdat: node the range resides on
 * @physaddr: starting address of the range
 * @size: size of the range in bytes
 *
 * Partial pages will be considered reserved and left as they are.
 *
396
 * The range must reside completely on the specified node.
397
 */
398 399 400
void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
			      unsigned long size)
{
401 402
	unsigned long start, end;

403 404
	kmemleak_free_part(__va(physaddr), size);

405 406 407 408
	start = PFN_UP(physaddr);
	end = PFN_DOWN(physaddr + size);

	mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
409 410
}

411 412
/**
 * free_bootmem - mark a page range as usable
413
 * @addr: starting physical address of the range
414 415 416 417
 * @size: size of the range in bytes
 *
 * Partial pages will be considered reserved and left as they are.
 *
418
 * The range must be contiguous but may span node boundaries.
419
 */
420
void __init free_bootmem(unsigned long physaddr, unsigned long size)
421
{
422
	unsigned long start, end;
423

424
	kmemleak_free_part(__va(physaddr), size);
425

426 427
	start = PFN_UP(physaddr);
	end = PFN_DOWN(physaddr + size);
L
Linus Torvalds 已提交
428

429
	mark_bootmem(start, end, 0, 0);
L
Linus Torvalds 已提交
430 431
}

432 433 434 435 436 437 438 439 440
/**
 * reserve_bootmem_node - mark a page range as reserved
 * @pgdat: node the range resides on
 * @physaddr: starting address of the range
 * @size: size of the range in bytes
 * @flags: reservation flags (see linux/bootmem.h)
 *
 * Partial pages will be reserved.
 *
441
 * The range must reside completely on the specified node.
442
 */
443 444
int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
				 unsigned long size, int flags)
L
Linus Torvalds 已提交
445
{
446
	unsigned long start, end;
L
Linus Torvalds 已提交
447

448 449 450 451
	start = PFN_DOWN(physaddr);
	end = PFN_UP(physaddr + size);

	return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
452
}
453

454
/**
455
 * reserve_bootmem - mark a page range as reserved
456 457 458 459 460 461
 * @addr: starting address of the range
 * @size: size of the range in bytes
 * @flags: reservation flags (see linux/bootmem.h)
 *
 * Partial pages will be reserved.
 *
462
 * The range must be contiguous but may span node boundaries.
463
 */
464 465 466
int __init reserve_bootmem(unsigned long addr, unsigned long size,
			    int flags)
{
467
	unsigned long start, end;
L
Linus Torvalds 已提交
468

469 470
	start = PFN_DOWN(addr);
	end = PFN_UP(addr + size);
471

472
	return mark_bootmem(start, end, 1, flags);
L
Linus Torvalds 已提交
473 474
}

475 476
static unsigned long __init align_idx(struct bootmem_data *bdata,
				      unsigned long idx, unsigned long step)
477 478 479 480 481 482 483 484 485 486 487
{
	unsigned long base = bdata->node_min_pfn;

	/*
	 * Align the index with respect to the node start so that the
	 * combination of both satisfies the requested alignment.
	 */

	return ALIGN(base + idx, step) - base;
}

488 489
static unsigned long __init align_off(struct bootmem_data *bdata,
				      unsigned long off, unsigned long align)
490 491 492 493 494 495 496 497
{
	unsigned long base = PFN_PHYS(bdata->node_min_pfn);

	/* Same as align_idx for byte offsets */

	return ALIGN(base + off, align) - base;
}

498
static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
499 500
					unsigned long size, unsigned long align,
					unsigned long goal, unsigned long limit)
L
Linus Torvalds 已提交
501
{
502
	unsigned long fallback = 0;
503 504
	unsigned long min, max, start, sidx, midx, step;

505 506 507 508
	bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
		bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
		align, goal, limit);

509 510 511
	BUG_ON(!size);
	BUG_ON(align & (align - 1));
	BUG_ON(limit && goal + size > limit);
L
Linus Torvalds 已提交
512

513 514 515
	if (!bdata->node_bootmem_map)
		return NULL;

516
	min = bdata->node_min_pfn;
517
	max = bdata->node_low_pfn;
Y
Yinghai Lu 已提交
518

519 520 521 522 523 524
	goal >>= PAGE_SHIFT;
	limit >>= PAGE_SHIFT;

	if (limit && max > limit)
		max = limit;
	if (max <= min)
Y
Yinghai Lu 已提交
525 526
		return NULL;

527
	step = max(align >> PAGE_SHIFT, 1UL);
528

529 530 531 532
	if (goal && min < goal && goal < max)
		start = ALIGN(goal, step);
	else
		start = ALIGN(min, step);
L
Linus Torvalds 已提交
533

534
	sidx = start - bdata->node_min_pfn;
535
	midx = max - bdata->node_min_pfn;
L
Linus Torvalds 已提交
536

537
	if (bdata->hint_idx > sidx) {
538 539 540 541 542
		/*
		 * Handle the valid case of sidx being zero and still
		 * catch the fallback below.
		 */
		fallback = sidx + 1;
543
		sidx = align_idx(bdata, bdata->hint_idx, step);
544
	}
L
Linus Torvalds 已提交
545

546 547 548 549 550 551
	while (1) {
		int merge;
		void *region;
		unsigned long eidx, i, start_off, end_off;
find_block:
		sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
552
		sidx = align_idx(bdata, sidx, step);
553
		eidx = sidx + PFN_UP(size);
554

555
		if (sidx >= midx || eidx > midx)
556
			break;
L
Linus Torvalds 已提交
557

558 559
		for (i = sidx; i < eidx; i++)
			if (test_bit(i, bdata->node_bootmem_map)) {
560
				sidx = align_idx(bdata, i, step);
561 562 563 564
				if (sidx == i)
					sidx += step;
				goto find_block;
			}
L
Linus Torvalds 已提交
565

566
		if (bdata->last_end_off & (PAGE_SIZE - 1) &&
567
				PFN_DOWN(bdata->last_end_off) + 1 == sidx)
568
			start_off = align_off(bdata, bdata->last_end_off, align);
569 570 571 572 573 574 575 576 577 578 579 580
		else
			start_off = PFN_PHYS(sidx);

		merge = PFN_DOWN(start_off) < sidx;
		end_off = start_off + size;

		bdata->last_end_off = end_off;
		bdata->hint_idx = PFN_UP(end_off);

		/*
		 * Reserve the area now:
		 */
J
Johannes Weiner 已提交
581 582 583
		if (__reserve(bdata, PFN_DOWN(start_off) + merge,
				PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
			BUG();
584

585 586
		region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
				start_off);
587
		memset(region, 0, size);
588 589 590 591 592
		/*
		 * The min_count is set to 0 so that bootmem allocated blocks
		 * are never reported as leaks.
		 */
		kmemleak_alloc(region, size, 0, 0);
593
		return region;
L
Linus Torvalds 已提交
594 595
	}

596
	if (fallback) {
597
		sidx = align_idx(bdata, fallback - 1, step);
598 599 600 601 602 603 604
		fallback = 0;
		goto find_block;
	}

	return NULL;
}

605
static void * __init alloc_bootmem_core(unsigned long size,
606 607 608 609 610
					unsigned long align,
					unsigned long goal,
					unsigned long limit)
{
	bootmem_data_t *bdata;
611
	void *region;
612

613 614
	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc(size, GFP_NOWAIT);
615

616
	list_for_each_entry(bdata, &bdata_list, list) {
617 618
		if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
			continue;
619
		if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
620 621
			break;

622
		region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
623 624 625 626
		if (region)
			return region;
	}

627 628 629 630 631 632 633 634 635 636 637 638 639 640
	return NULL;
}

static void * __init ___alloc_bootmem_nopanic(unsigned long size,
					      unsigned long align,
					      unsigned long goal,
					      unsigned long limit)
{
	void *ptr;

restart:
	ptr = alloc_bootmem_core(size, align, goal, limit);
	if (ptr)
		return ptr;
641 642
	if (goal) {
		goal = 0;
643
		goto restart;
644
	}
645

646
	return NULL;
L
Linus Torvalds 已提交
647 648
}

649 650 651 652 653 654 655 656 657 658 659 660 661
/**
 * __alloc_bootmem_nopanic - allocate boot memory without panicking
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may happen on any node in the system.
 *
 * Returns NULL on failure.
 */
662
void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
663
					unsigned long goal)
L
Linus Torvalds 已提交
664
{
665 666 667
	unsigned long limit = 0;

	return ___alloc_bootmem_nopanic(size, align, goal, limit);
668
}
L
Linus Torvalds 已提交
669

670 671 672 673 674 675 676 677 678 679 680 681
static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
					unsigned long goal, unsigned long limit)
{
	void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);

	if (mem)
		return mem;
	/*
	 * Whoops, we cannot satisfy the allocation request.
	 */
	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
	panic("Out of memory");
682 683
	return NULL;
}
L
Linus Torvalds 已提交
684

685 686 687 688 689 690 691 692 693 694 695 696 697
/**
 * __alloc_bootmem - allocate boot memory
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may happen on any node in the system.
 *
 * The function panics if the request can not be satisfied.
 */
698 699
void * __init __alloc_bootmem(unsigned long size, unsigned long align,
			      unsigned long goal)
700
{
701 702 703
	unsigned long limit = 0;

	return ___alloc_bootmem(size, align, goal, limit);
L
Linus Torvalds 已提交
704 705
}

706
void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
707 708 709 710 711
				unsigned long size, unsigned long align,
				unsigned long goal, unsigned long limit)
{
	void *ptr;

712 713
	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc(size, GFP_NOWAIT);
714
again:
715

716 717 718 719
	/* do not panic in alloc_bootmem_bdata() */
	if (limit && goal + size > limit)
		limit = 0;

720
	ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
721 722 723
	if (ptr)
		return ptr;

724 725 726 727 728 729 730 731 732
	ptr = alloc_bootmem_core(size, align, goal, limit);
	if (ptr)
		return ptr;

	if (goal) {
		goal = 0;
		goto again;
	}

733 734 735 736 737 738 739 740 741
	return NULL;
}

void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
				   unsigned long align, unsigned long goal)
{
	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);

742
	return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
743 744
}

745
void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
746 747 748 749 750
				    unsigned long align, unsigned long goal,
				    unsigned long limit)
{
	void *ptr;

751
	ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
752 753 754
	if (ptr)
		return ptr;

755 756 757
	printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
	panic("Out of memory");
	return NULL;
758 759
}

760 761 762 763 764 765 766 767 768 769 770 771 772 773 774
/**
 * __alloc_bootmem_node - allocate boot memory from a specific node
 * @pgdat: node to allocate from
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may fall back to any node in the system if the specified node
 * can not hold the requested memory.
 *
 * The function panics if the request can not be satisfied.
 */
775 776
void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
				   unsigned long align, unsigned long goal)
L
Linus Torvalds 已提交
777
{
778 779 780
	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);

781
	return  ___alloc_bootmem_node(pgdat, size, align, goal, 0);
782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801
}

void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
				   unsigned long align, unsigned long goal)
{
#ifdef MAX_DMA32_PFN
	unsigned long end_pfn;

	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);

	/* update goal according ...MAX_DMA32_PFN */
	end_pfn = pgdat->node_start_pfn + pgdat->node_spanned_pages;

	if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
	    (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
		void *ptr;
		unsigned long new_goal;

		new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
802
		ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
803 804 805 806 807 808 809 810
						 new_goal, 0);
		if (ptr)
			return ptr;
	}
#endif

	return __alloc_bootmem_node(pgdat, size, align, goal);

L
Linus Torvalds 已提交
811 812
}

813 814 815
#ifndef ARCH_LOW_ADDRESS_LIMIT
#define ARCH_LOW_ADDRESS_LIMIT	0xffffffffUL
#endif
816

817 818 819 820 821 822 823 824 825 826 827 828 829
/**
 * __alloc_bootmem_low - allocate low boot memory
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may happen on any node in the system.
 *
 * The function panics if the request can not be satisfied.
 */
830 831
void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
				  unsigned long goal)
832
{
833
	return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
834 835
}

836 837 838 839 840 841 842 843
void * __init __alloc_bootmem_low_nopanic(unsigned long size,
					  unsigned long align,
					  unsigned long goal)
{
	return ___alloc_bootmem_nopanic(size, align, goal,
					ARCH_LOW_ADDRESS_LIMIT);
}

844 845 846 847 848 849 850 851 852 853 854 855 856 857 858
/**
 * __alloc_bootmem_low_node - allocate low boot memory from a specific node
 * @pgdat: node to allocate from
 * @size: size of the request in bytes
 * @align: alignment of the region
 * @goal: preferred starting address of the region
 *
 * The goal is dropped if it can not be satisfied and the allocation will
 * fall back to memory below @goal.
 *
 * Allocation may fall back to any node in the system if the specified node
 * can not hold the requested memory.
 *
 * The function panics if the request can not be satisfied.
 */
859 860 861
void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
				       unsigned long align, unsigned long goal)
{
862 863 864
	if (WARN_ON_ONCE(slab_is_available()))
		return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);

865 866
	return ___alloc_bootmem_node(pgdat, size, align,
				     goal, ARCH_LOW_ADDRESS_LIMIT);
867
}