setup.c 25.1 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1995 Linus Torvalds
 * Copyright (C) 1995 Waldorf Electronics
 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
 * Copyright (C) 1996 Stoned Elipot
 * Copyright (C) 1999 Silicon Graphics, Inc.
R
Ralf Baechle 已提交
11
 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
L
Linus Torvalds 已提交
12 13 14
 */
#include <linux/init.h>
#include <linux/ioport.h>
15
#include <linux/export.h>
16
#include <linux/screen_info.h>
T
Tejun Heo 已提交
17
#include <linux/memblock.h>
L
Linus Torvalds 已提交
18 19 20 21 22
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
#include <linux/console.h>
D
Dave Hansen 已提交
23
#include <linux/pfn.h>
24
#include <linux/debugfs.h>
R
Ralf Baechle 已提交
25
#include <linux/kexec.h>
J
John Crispin 已提交
26
#include <linux/sizes.h>
27 28
#include <linux/device.h>
#include <linux/dma-contiguous.h>
29
#include <linux/decompress/generic.h>
30
#include <linux/of_fdt.h>
L
Linus Torvalds 已提交
31 32 33

#include <asm/addrspace.h>
#include <asm/bootinfo.h>
34
#include <asm/bugs.h>
R
Ralf Baechle 已提交
35
#include <asm/cache.h>
36
#include <asm/cdmm.h>
L
Linus Torvalds 已提交
37
#include <asm/cpu.h>
38
#include <asm/debug.h>
L
Linus Torvalds 已提交
39 40
#include <asm/sections.h>
#include <asm/setup.h>
41
#include <asm/smp-ops.h>
42
#include <asm/prom.h>
L
Linus Torvalds 已提交
43

44 45 46 47
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
const char __section(.appended_dtb) __appended_dtb[0x100000];
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */

R
Ralf Baechle 已提交
48
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
L
Linus Torvalds 已提交
49 50 51 52 53 54 55 56 57 58 59 60

EXPORT_SYMBOL(cpu_data);

#ifdef CONFIG_VT
struct screen_info screen_info;
#endif

/*
 * Setup information
 *
 * These are initialized so they are in the .data section
 */
R
Ralf Baechle 已提交
61
unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
L
Linus Torvalds 已提交
62 63 64 65 66

EXPORT_SYMBOL(mips_machtype);

struct boot_mem_map boot_mem_map;

67 68 69 70 71 72
static char __initdata command_line[COMMAND_LINE_SIZE];
char __initdata arcs_cmdline[COMMAND_LINE_SIZE];

#ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
#endif
L
Linus Torvalds 已提交
73 74 75 76 77

/*
 * mips_io_port_base is the begin of the address space to which x86 style
 * I/O ports are mapped.
 */
D
David Daney 已提交
78
const unsigned long mips_io_port_base = -1;
L
Linus Torvalds 已提交
79 80 81 82 83
EXPORT_SYMBOL(mips_io_port_base);

static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };

J
John Crispin 已提交
84 85
static void *detect_magic __initdata = detect_memory_region;

86
void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
L
Linus Torvalds 已提交
87 88
{
	int x = boot_mem_map.nr_map;
89
	int i;
L
Linus Torvalds 已提交
90

91 92 93 94 95 96 97
	/*
	 * If the region reaches the top of the physical address space, adjust
	 * the size slightly so that (start + size) doesn't overflow
	 */
	if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
		--size;

98 99
	/* Sanity check */
	if (start + size < start) {
J
Joe Perches 已提交
100
		pr_warn("Trying to add an invalid memory region, skipped\n");
101 102 103
		return;
	}

L
Linus Torvalds 已提交
104
	/*
105
	 * Try to merge with existing entry, if any.
L
Linus Torvalds 已提交
106
	 */
107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		struct boot_mem_map_entry *entry = boot_mem_map.map + i;
		unsigned long top;

		if (entry->type != type)
			continue;

		if (start + size < entry->addr)
			continue;			/* no overlap */

		if (entry->addr + entry->size < start)
			continue;			/* no overlap */

		top = max(entry->addr + entry->size, start + size);
		entry->addr = min(entry->addr, start);
		entry->size = top - entry->addr;

L
Linus Torvalds 已提交
124 125 126
		return;
	}

127
	if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
128
		pr_err("Ooops! Too many entries in the memory map!\n");
L
Linus Torvalds 已提交
129 130 131 132 133 134 135 136 137
		return;
	}

	boot_mem_map.map[x].addr = start;
	boot_mem_map.map[x].size = size;
	boot_mem_map.map[x].type = type;
	boot_mem_map.nr_map++;
}

138
void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
J
John Crispin 已提交
139 140
{
	void *dm = &detect_magic;
141
	phys_addr_t size;
J
John Crispin 已提交
142 143 144 145 146 147 148 149 150 151 152 153 154 155 156

	for (size = sz_min; size < sz_max; size <<= 1) {
		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
			break;
	}

	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
		((unsigned long long) size) / SZ_1M,
		(unsigned long long) start,
		((unsigned long long) sz_min) / SZ_1M,
		((unsigned long long) sz_max) / SZ_1M);

	add_memory_region(start, size, BOOT_MEM_RAM);
}

157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
bool __init memory_region_available(phys_addr_t start, phys_addr_t size)
{
	int i;
	bool in_ram = false, free = true;

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		phys_addr_t start_, end_;

		start_ = boot_mem_map.map[i].addr;
		end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;

		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
			if (start >= start_ && start + size <= end_)
				in_ram = true;
			break;
		case BOOT_MEM_RESERVED:
			if ((start >= start_ && start < end_) ||
			    (start < start_ && start + size >= start_))
				free = false;
			break;
		default:
			continue;
		}
	}

	return in_ram && free;
}

L
Linus Torvalds 已提交
186 187 188 189 190 191
static void __init print_memory_map(void)
{
	int i;
	const int field = 2 * sizeof(unsigned long);

	for (i = 0; i < boot_mem_map.nr_map; i++) {
192
		printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
L
Linus Torvalds 已提交
193 194 195 196 197
		       field, (unsigned long long) boot_mem_map.map[i].size,
		       field, (unsigned long long) boot_mem_map.map[i].addr);

		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
198
			printk(KERN_CONT "(usable)\n");
L
Linus Torvalds 已提交
199
			break;
200 201 202
		case BOOT_MEM_INIT_RAM:
			printk(KERN_CONT "(usable after init)\n");
			break;
L
Linus Torvalds 已提交
203
		case BOOT_MEM_ROM_DATA:
204
			printk(KERN_CONT "(ROM data)\n");
L
Linus Torvalds 已提交
205 206
			break;
		case BOOT_MEM_RESERVED:
207
			printk(KERN_CONT "(reserved)\n");
L
Linus Torvalds 已提交
208 209
			break;
		default:
210
			printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
L
Linus Torvalds 已提交
211 212 213 214 215
			break;
		}
	}
}

216 217 218 219 220
/*
 * Manage initrd
 */
#ifdef CONFIG_BLK_DEV_INITRD

221
static int __init rd_start_early(char *p)
L
Linus Torvalds 已提交
222
{
223
	unsigned long start = memparse(p, &p);
L
Linus Torvalds 已提交
224

225
#ifdef CONFIG_64BIT
226 227 228
	/* Guess if the sign extension was forgotten by bootloader */
	if (start < XKPHYS)
		start = (int)start;
L
Linus Torvalds 已提交
229
#endif
230 231 232 233 234 235 236 237 238
	initrd_start = start;
	initrd_end += start;
	return 0;
}
early_param("rd_start", rd_start_early);

static int __init rd_size_early(char *p)
{
	initrd_end += memparse(p, &p);
L
Linus Torvalds 已提交
239 240
	return 0;
}
241
early_param("rd_size", rd_size_early);
L
Linus Torvalds 已提交
242

243
/* it returns the next free pfn after initrd */
244 245
static unsigned long __init init_initrd(void)
{
246
	unsigned long end;
247 248

	/*
249 250 251
	 * Board specific code or command line parser should have
	 * already set up initrd_start and initrd_end. In these cases
	 * perfom sanity checks and use them if all looks good.
252
	 */
253
	if (!initrd_start || initrd_end <= initrd_start)
254 255 256
		goto disable;

	if (initrd_start & ~PAGE_MASK) {
257
		pr_err("initrd start must be page aligned\n");
258
		goto disable;
259
	}
260
	if (initrd_start < PAGE_OFFSET) {
261
		pr_err("initrd start < PAGE_OFFSET\n");
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281
		goto disable;
	}

	/*
	 * Sanitize initrd addresses. For example firmware
	 * can't guess if they need to pass them through
	 * 64-bits values if the kernel has been built in pure
	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
	 * addresses now, so the code can now safely use __pa().
	 */
	end = __pa(initrd_end);
	initrd_end = (unsigned long)__va(end);
	initrd_start = (unsigned long)__va(__pa(initrd_start));

	ROOT_DEV = Root_RAM0;
	return PFN_UP(end);
disable:
	initrd_start = 0;
	initrd_end = 0;
	return 0;
282 283
}

284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312
/* In some conditions (e.g. big endian bootloader with a little endian
   kernel), the initrd might appear byte swapped.  Try to detect this and
   byte swap it if needed.  */
static void __init maybe_bswap_initrd(void)
{
#if defined(CONFIG_CPU_CAVIUM_OCTEON)
	u64 buf;

	/* Check for CPIO signature */
	if (!memcmp((void *)initrd_start, "070701", 6))
		return;

	/* Check for compressed initrd */
	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
		return;

	/* Try again with a byte swapped header */
	buf = swab64p((u64 *)initrd_start);
	if (!memcmp(&buf, "070701", 6) ||
	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
		unsigned long i;

		pr_info("Byteswapped initrd detected\n");
		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
			swab64s((u64 *)i);
	}
#endif
}

313 314 315 316 317 318 319 320
static void __init finalize_initrd(void)
{
	unsigned long size = initrd_end - initrd_start;

	if (size == 0) {
		printk(KERN_INFO "Initrd not found or empty");
		goto disable;
	}
321
	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
322
		printk(KERN_ERR "Initrd extends beyond end of memory");
323 324 325
		goto disable;
	}

326 327
	maybe_bswap_initrd();

328
	reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
329 330
	initrd_below_start_ok = 1;

331 332
	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
		initrd_start, size);
333 334
	return;
disable:
335
	printk(KERN_CONT " - disabling initrd\n");
336 337 338 339 340 341
	initrd_start = 0;
	initrd_end = 0;
}

#else  /* !CONFIG_BLK_DEV_INITRD */

342 343 344 345 346
static unsigned long __init init_initrd(void)
{
	return 0;
}

347 348 349 350
#define finalize_initrd()	do {} while (0)

#endif

351 352 353 354
/*
 * Initialize the bootmem allocator. It also setup initrd related data
 * if needed.
 */
355
#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
356

357
static void __init bootmem_init(void)
L
Linus Torvalds 已提交
358
{
359 360 361 362 363 364
	init_initrd();
	finalize_initrd();
}

#else  /* !CONFIG_SGI_IP27 */

365 366 367 368 369 370 371
static unsigned long __init bootmap_bytes(unsigned long pages)
{
	unsigned long bytes = DIV_ROUND_UP(pages, 8);

	return ALIGN(bytes, sizeof(long));
}

372 373
static void __init bootmem_init(void)
{
374
	unsigned long reserved_end;
375
	unsigned long mapstart = ~0UL;
L
Linus Torvalds 已提交
376
	unsigned long bootmap_size;
377
	bool bootmap_valid = false;
L
Linus Torvalds 已提交
378 379 380
	int i;

	/*
381 382 383 384
	 * Sanity check any INITRD first. We don't take it into account
	 * for bootmem setup initially, rely on the end-of-kernel-code
	 * as our memory range starting point. Once bootmem is inited we
	 * will reserve the area used for the initrd.
L
Linus Torvalds 已提交
385
	 */
386 387
	init_initrd();
	reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
L
Linus Torvalds 已提交
388

389 390 391 392 393 394 395
	/*
	 * max_low_pfn is not a number of pages. The number of pages
	 * of the system is given by 'max_low_pfn - min_low_pfn'.
	 */
	min_low_pfn = ~0UL;
	max_low_pfn = 0;

396 397 398
	/*
	 * Find the highest page frame number we have available.
	 */
L
Linus Torvalds 已提交
399 400 401 402 403 404 405 406
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
407
				+ boot_mem_map.map[i].size);
L
Linus Torvalds 已提交
408

409 410 411 412 413 414 415 416 417 418 419 420 421
#ifndef CONFIG_HIGHMEM
		/*
		 * Skip highmem here so we get an accurate max_low_pfn if low
		 * memory stops short of high memory.
		 * If the region overlaps HIGHMEM_START, end is clipped so
		 * max_pfn excludes the highmem portion.
		 */
		if (start >= PFN_DOWN(HIGHMEM_START))
			continue;
		if (end > PFN_DOWN(HIGHMEM_START))
			end = PFN_DOWN(HIGHMEM_START);
#endif

422 423 424 425
		if (end > max_low_pfn)
			max_low_pfn = end;
		if (start < min_low_pfn)
			min_low_pfn = start;
426
		if (end <= reserved_end)
L
Linus Torvalds 已提交
427
			continue;
428
#ifdef CONFIG_BLK_DEV_INITRD
429
		/* Skip zones before initrd and initrd itself */
430 431 432
		if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
			continue;
#endif
433 434 435
		if (start >= mapstart)
			continue;
		mapstart = max(reserved_end, start);
L
Linus Torvalds 已提交
436 437
	}

438 439
	if (min_low_pfn >= max_low_pfn)
		panic("Incorrect memory mapping !!!");
440
	if (min_low_pfn > ARCH_PFN_OFFSET) {
441 442 443
		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
			(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
			min_low_pfn - ARCH_PFN_OFFSET);
444
	} else if (min_low_pfn < ARCH_PFN_OFFSET) {
445 446
		pr_info("%lu free pages won't be used\n",
			ARCH_PFN_OFFSET - min_low_pfn);
447
	}
448
	min_low_pfn = ARCH_PFN_OFFSET;
449

L
Linus Torvalds 已提交
450 451 452
	/*
	 * Determine low and high memory ranges
	 */
R
Ralf Baechle 已提交
453
	max_pfn = max_low_pfn;
454
	if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
455 456
#ifdef CONFIG_HIGHMEM
		highstart_pfn = PFN_DOWN(HIGHMEM_START);
457
		highend_pfn = max_low_pfn;
L
Linus Torvalds 已提交
458
#endif
459
		max_low_pfn = PFN_DOWN(HIGHMEM_START);
L
Linus Torvalds 已提交
460 461
	}

462 463 464 465 466 467 468 469
#ifdef CONFIG_BLK_DEV_INITRD
	/*
	 * mapstart should be after initrd_end
	 */
	if (initrd_end)
		mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
#endif

L
Linus Torvalds 已提交
470
	/*
471 472
	 * check that mapstart doesn't overlap with any of
	 * memory regions that have been reserved through eg. DTB
L
Linus Torvalds 已提交
473
	 */
474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496
	bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);

	bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
						bootmap_size);
	for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
		unsigned long mapstart_addr;

		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RESERVED:
			mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
						boot_mem_map.map[i].size);
			if (PHYS_PFN(mapstart_addr) < mapstart)
				break;

			bootmap_valid = memory_region_available(mapstart_addr,
								bootmap_size);
			if (bootmap_valid)
				mapstart = PHYS_PFN(mapstart_addr);
			break;
		default:
			break;
		}
	}
497

498 499 500 501 502 503 504 505 506
	if (!bootmap_valid)
		panic("No memory area to place a bootmap bitmap");

	/*
	 * Initialize the boot-time allocator with low memory only.
	 */
	if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
					 min_low_pfn, max_low_pfn))
		panic("Unexpected memory size required for bootmap");
507 508 509 510 511 512 513 514

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
				+ boot_mem_map.map[i].size);

515 516
		if (start <= min_low_pfn)
			start = min_low_pfn;
517 518 519 520 521 522 523 524 525 526 527 528 529 530
		if (start >= end)
			continue;

#ifndef CONFIG_HIGHMEM
		if (end > max_low_pfn)
			end = max_low_pfn;

		/*
		 * ... finally, is the area going away?
		 */
		if (end <= start)
			continue;
#endif

T
Tejun Heo 已提交
531
		memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
532 533
	}

L
Linus Torvalds 已提交
534 535 536 537
	/*
	 * Register fully available low RAM pages with the bootmem allocator.
	 */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
538
		unsigned long start, end, size;
L
Linus Torvalds 已提交
539

540 541 542 543
		start = PFN_UP(boot_mem_map.map[i].addr);
		end   = PFN_DOWN(boot_mem_map.map[i].addr
				    + boot_mem_map.map[i].size);

L
Linus Torvalds 已提交
544 545 546
		/*
		 * Reserve usable memory.
		 */
547 548 549 550 551
		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
			break;
		case BOOT_MEM_INIT_RAM:
			memory_present(0, start, end);
L
Linus Torvalds 已提交
552
			continue;
553 554
		default:
			/* Not usable memory */
555 556 557 558
			if (start > min_low_pfn && end < max_low_pfn)
				reserve_bootmem(boot_mem_map.map[i].addr,
						boot_mem_map.map[i].size,
						BOOTMEM_DEFAULT);
559 560
			continue;
		}
L
Linus Torvalds 已提交
561 562

		/*
563 564
		 * We are rounding up the start address of usable memory
		 * and at the end of the usable range downwards.
L
Linus Torvalds 已提交
565
		 */
566
		if (start >= max_low_pfn)
L
Linus Torvalds 已提交
567
			continue;
568 569 570 571
		if (start < reserved_end)
			start = reserved_end;
		if (end > max_low_pfn)
			end = max_low_pfn;
L
Linus Torvalds 已提交
572 573

		/*
574
		 * ... finally, is the area going away?
L
Linus Torvalds 已提交
575
		 */
576
		if (end <= start)
L
Linus Torvalds 已提交
577
			continue;
578
		size = end - start;
L
Linus Torvalds 已提交
579 580

		/* Register lowmem ranges */
581 582
		free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
		memory_present(0, start, end);
L
Linus Torvalds 已提交
583 584
	}

585 586 587
	/*
	 * Reserve the bootmap memory.
	 */
588
	reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
589

590 591 592 593 594 595 596 597
#ifdef CONFIG_RELOCATABLE
	/*
	 * The kernel reserves all memory below its _end symbol as bootmem,
	 * but the kernel may now be at a much higher address. The memory
	 * between the original and new locations may be returned to the system.
	 */
	if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
		unsigned long offset;
598
		extern void show_kernel_relocation(const char *level);
599 600 601

		offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
		free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
602 603 604 605 606 607 608 609

#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
		/*
		 * This information is necessary when debugging the kernel
		 * But is a security vulnerability otherwise!
		 */
		show_kernel_relocation(KERN_INFO);
#endif
610 611 612
	}
#endif

613 614 615 616
	/*
	 * Reserve initrd memory if needed.
	 */
	finalize_initrd();
L
Linus Torvalds 已提交
617 618
}

619 620
#endif	/* CONFIG_SGI_IP27 */

621
/*
J
Joe Perches 已提交
622
 * arch_mem_init - initialize memory management subsystem
623 624 625 626 627
 *
 *  o plat_mem_setup() detects the memory configuration and will record detected
 *    memory areas using add_memory_region.
 *
 * At this stage the memory configuration of the system is known to the
J
Joe Perches 已提交
628
 * kernel but generic memory management system is still entirely uninitialized.
629 630 631 632
 *
 *  o bootmem_init()
 *  o sparse_init()
 *  o paging_init()
633
 *  o dma_contiguous_reserve()
634 635 636 637
 *
 * At this stage the bootmem allocator is ready to use.
 *
 * NOTE: historically plat_mem_setup did the entire platform initialization.
R
Ralf Baechle 已提交
638
 *	 This was rather impractical because it meant plat_mem_setup had to
639
 * get away without any kind of memory allocator.  To keep old code from
640
 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
641 642 643
 * initialization hook for anything else was introduced.
 */

644
static int usermem __initdata;
645 646 647

static int __init early_parse_mem(char *p)
{
648
	phys_addr_t start, size;
649 650 651 652 653 654 655 656 657

	/*
	 * If a user specifies memory size, we
	 * blow away any automatically generated
	 * size.
	 */
	if (usermem == 0) {
		boot_mem_map.nr_map = 0;
		usermem = 1;
R
Ralf Baechle 已提交
658
	}
659 660 661 662 663 664
	start = 0;
	size = memparse(p, &p);
	if (*p == '@')
		start = memparse(p + 1, &p);

	add_memory_region(start, size, BOOT_MEM_RAM);
665 666 667 668

	if (start && start > PHYS_OFFSET)
		add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
				BOOT_MEM_RESERVED);
669 670 671
	return 0;
}
early_param("mem", early_parse_mem);
672

673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712
static int __init early_parse_memmap(char *p)
{
	char *oldp;
	u64 start_at, mem_size;

	if (!p)
		return -EINVAL;

	if (!strncmp(p, "exactmap", 8)) {
		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
		return 0;
	}

	oldp = p;
	mem_size = memparse(p, &p);
	if (p == oldp)
		return -EINVAL;

	if (*p == '@') {
		start_at = memparse(p+1, &p);
		add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
	} else if (*p == '#') {
		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
		return -EINVAL;
	} else if (*p == '$') {
		start_at = memparse(p+1, &p);
		add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
	} else {
		pr_err("\"memmap\" invalid format!\n");
		return -EINVAL;
	}

	if (*p == '\0') {
		usermem = 1;
		return 0;
	} else
		return -EINVAL;
}
early_param("memmap", early_parse_memmap);

C
Corey Minyard 已提交
713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743
#ifdef CONFIG_PROC_VMCORE
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
static int __init early_parse_elfcorehdr(char *p)
{
	int i;

	setup_elfcorehdr = memparse(p, &p);

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start = boot_mem_map.map[i].addr;
		unsigned long end = (boot_mem_map.map[i].addr +
				     boot_mem_map.map[i].size);
		if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
			/*
			 * Reserve from the elf core header to the end of
			 * the memory segment, that should all be kdump
			 * reserved memory.
			 */
			setup_elfcorehdr_size = end - setup_elfcorehdr;
			break;
		}
	}
	/*
	 * If we don't find it in the memory map, then we shouldn't
	 * have to worry about it, as the new kernel won't use it.
	 */
	return 0;
}
early_param("elfcorehdr", early_parse_elfcorehdr);
#endif

744
static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
745
{
746
	phys_addr_t size;
747
	int i;
748

749 750 751 752 753 754 755 756 757 758 759 760 761
	size = end - mem;
	if (!size)
		return;

	/* Make sure it is in the boot_mem_map */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		if (mem >= boot_mem_map.map[i].addr &&
		    mem < (boot_mem_map.map[i].addr +
			   boot_mem_map.map[i].size))
			return;
	}
	add_memory_region(mem, size, type);
}
762

763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
#ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem(void)
{
	unsigned long long total;

	total = max_pfn - min_low_pfn;
	return total << PAGE_SHIFT;
}

static void __init mips_parse_crashkernel(void)
{
	unsigned long long total_mem;
	unsigned long long crash_size, crash_base;
	int ret;

	total_mem = get_total_mem();
	ret = parse_crashkernel(boot_command_line, total_mem,
				&crash_size, &crash_base);
	if (ret != 0 || crash_size <= 0)
		return;

784 785 786 787 788
	if (!memory_region_available(crash_base, crash_size)) {
		pr_warn("Invalid memory region reserved for crash kernel\n");
		return;
	}

789 790 791 792 793 794 795 796
	crashk_res.start = crash_base;
	crashk_res.end	 = crash_base + crash_size - 1;
}

static void __init request_crashkernel(struct resource *res)
{
	int ret;

797 798 799
	if (crashk_res.start == crashk_res.end)
		return;

800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816
	ret = request_resource(res, &crashk_res);
	if (!ret)
		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
			(unsigned long)((crashk_res.end -
					 crashk_res.start + 1) >> 20),
			(unsigned long)(crashk_res.start  >> 20));
}
#else /* !defined(CONFIG_KEXEC)		*/
static void __init mips_parse_crashkernel(void)
{
}

static void __init request_crashkernel(struct resource *res)
{
}
#endif /* !defined(CONFIG_KEXEC)  */

817 818
#define USE_PROM_CMDLINE	IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
#define USE_DTB_CMDLINE		IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
J
Jaedon Shin 已提交
819
#define EXTEND_WITH_PROM	IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
820 821
#define BUILTIN_EXTEND_WITH_PROM	\
	IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
822

823 824
static void __init arch_mem_init(char **cmdline_p)
{
825
	struct memblock_region *reg;
826 827
	extern void plat_mem_setup(void);

828 829 830
	/* call board setup routine */
	plat_mem_setup();

831 832 833 834 835 836 837 838 839 840 841 842
	/*
	 * Make sure all kernel memory is in the maps.  The "UP" and
	 * "DOWN" are opposite for initdata since if it crosses over
	 * into another memory section you don't want that to be
	 * freed when the initdata is freed.
	 */
	arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
			 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
			 BOOT_MEM_RAM);
	arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
			 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
			 BOOT_MEM_INIT_RAM);
843

844
	pr_info("Determined physical RAM map:\n");
845 846
	print_memory_map();

847
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
848 849
	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
850 851 852 853 854
	if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
	    (USE_DTB_CMDLINE && !boot_command_line[0]))
		strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);

	if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
855 856
		if (boot_command_line[0])
			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
857 858 859 860
		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
	}

#if defined(CONFIG_CMDLINE_BOOL)
861
	if (builtin_cmdline[0]) {
862 863
		if (boot_command_line[0])
			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
864
		strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
865
	}
866 867 868 869 870 871

	if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
		if (boot_command_line[0])
			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
	}
872 873 874
#endif
#endif
	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
875 876 877

	*cmdline_p = command_line;

878 879 880
	parse_early_param();

	if (usermem) {
881
		pr_info("User-defined physical RAM map:\n");
882 883 884
		print_memory_map();
	}

885 886 887
	early_init_fdt_reserve_self();
	early_init_fdt_scan_reserved_mem();

888
	bootmem_init();
C
Corey Minyard 已提交
889 890 891 892 893 894 895 896
#ifdef CONFIG_PROC_VMCORE
	if (setup_elfcorehdr && setup_elfcorehdr_size) {
		printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
		       setup_elfcorehdr, setup_elfcorehdr_size);
		reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
				BOOTMEM_DEFAULT);
	}
#endif
897 898

	mips_parse_crashkernel();
R
Ralf Baechle 已提交
899 900 901 902 903 904
#ifdef CONFIG_KEXEC
	if (crashk_res.start != crashk_res.end)
		reserve_bootmem(crashk_res.start,
				crashk_res.end - crashk_res.start + 1,
				BOOTMEM_DEFAULT);
#endif
905
	device_tree_init();
906
	sparse_init();
907
	plat_swiotlb_setup();
908 909 910 911

	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
	/* Tell bootmem about cma reserved memblock section */
	for_each_memblock(reserved, reg)
912 913
		if (reg->size != 0)
			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
914 915 916

	reserve_bootmem_region(__pa_symbol(&__nosave_begin),
			__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
917 918
}

919
static void __init resource_init(void)
L
Linus Torvalds 已提交
920 921 922
{
	int i;

923 924 925
	if (UNCAC_BASE != IO_BASE)
		return;

926 927 928 929
	code_resource.start = __pa_symbol(&_text);
	code_resource.end = __pa_symbol(&_etext) - 1;
	data_resource.start = __pa_symbol(&_etext);
	data_resource.end = __pa_symbol(&_edata) - 1;
L
Linus Torvalds 已提交
930 931 932 933 934 935 936

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		struct resource *res;
		unsigned long start, end;

		start = boot_mem_map.map[i].addr;
		end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
937
		if (start >= HIGHMEM_START)
L
Linus Torvalds 已提交
938
			continue;
939 940
		if (end >= HIGHMEM_START)
			end = HIGHMEM_START - 1;
L
Linus Torvalds 已提交
941 942

		res = alloc_bootmem(sizeof(struct resource));
943 944 945 946 947

		res->start = start;
		res->end = end;
		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;

L
Linus Torvalds 已提交
948 949
		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
950
		case BOOT_MEM_INIT_RAM:
L
Linus Torvalds 已提交
951 952
		case BOOT_MEM_ROM_DATA:
			res->name = "System RAM";
953
			res->flags |= IORESOURCE_SYSRAM;
L
Linus Torvalds 已提交
954 955 956 957 958 959 960 961 962 963 964 965 966 967 968
			break;
		case BOOT_MEM_RESERVED:
		default:
			res->name = "reserved";
		}

		request_resource(&iomem_resource, res);

		/*
		 *  We don't know which RAM region contains kernel data,
		 *  so we try it repeatedly and let the resource manager
		 *  test it.
		 */
		request_resource(res, &code_resource);
		request_resource(res, &data_resource);
R
Ralf Baechle 已提交
969
		request_crashkernel(res);
L
Linus Torvalds 已提交
970 971 972
	}
}

973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991
#ifdef CONFIG_SMP
static void __init prefill_possible_map(void)
{
	int i, possible = num_possible_cpus();

	if (possible > nr_cpu_ids)
		possible = nr_cpu_ids;

	for (i = 0; i < possible; i++)
		set_cpu_possible(i, true);
	for (; i < NR_CPUS; i++)
		set_cpu_possible(i, false);

	nr_cpu_ids = possible;
}
#else
static inline void prefill_possible_map(void) {}
#endif

L
Linus Torvalds 已提交
992 993 994
void __init setup_arch(char **cmdline_p)
{
	cpu_probe();
P
Paul Burton 已提交
995
	mips_cm_probe();
L
Linus Torvalds 已提交
996
	prom_init();
997

998
	setup_early_fdc_console();
999
#ifdef CONFIG_EARLY_PRINTK
1000
	setup_early_printk();
1001
#endif
L
Linus Torvalds 已提交
1002
	cpu_report();
1003
	check_bugs_early();
L
Linus Torvalds 已提交
1004 1005 1006

#if defined(CONFIG_VT)
#if defined(CONFIG_VGA_CONSOLE)
R
Ralf Baechle 已提交
1007
	conswitchp = &vga_con;
L
Linus Torvalds 已提交
1008
#elif defined(CONFIG_DUMMY_CONSOLE)
R
Ralf Baechle 已提交
1009
	conswitchp = &dummy_con;
L
Linus Torvalds 已提交
1010 1011 1012
#endif
#endif

1013
	arch_mem_init(cmdline_p);
L
Linus Torvalds 已提交
1014 1015

	resource_init();
1016
	plat_smp_setup();
1017
	prefill_possible_map();
1018 1019

	cpu_cache_init();
1020
	paging_init();
L
Linus Torvalds 已提交
1021 1022
}

1023 1024
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
1025

1026 1027 1028 1029
#ifdef CONFIG_USE_OF
unsigned long fw_passed_dtb;
#endif

1030 1031 1032 1033 1034 1035 1036
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
{
	struct dentry *d;

	d = debugfs_create_dir("mips", NULL);
1037 1038
	if (!d)
		return -ENOMEM;
1039 1040 1041 1042 1043
	mips_debugfs_dir = d;
	return 0;
}
arch_initcall(debugfs_mips);
#endif