setup.c 24.0 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1995 Linus Torvalds
 * Copyright (C) 1995 Waldorf Electronics
 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
 * Copyright (C) 1996 Stoned Elipot
 * Copyright (C) 1999 Silicon Graphics, Inc.
R
Ralf Baechle 已提交
11
 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
L
Linus Torvalds 已提交
12 13 14
 */
#include <linux/init.h>
#include <linux/ioport.h>
15
#include <linux/export.h>
16
#include <linux/screen_info.h>
T
Tejun Heo 已提交
17
#include <linux/memblock.h>
L
Linus Torvalds 已提交
18 19 20 21 22
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
#include <linux/console.h>
D
Dave Hansen 已提交
23
#include <linux/pfn.h>
24
#include <linux/debugfs.h>
R
Ralf Baechle 已提交
25
#include <linux/kexec.h>
J
John Crispin 已提交
26
#include <linux/sizes.h>
27 28
#include <linux/device.h>
#include <linux/dma-contiguous.h>
29
#include <linux/decompress/generic.h>
L
Linus Torvalds 已提交
30 31 32

#include <asm/addrspace.h>
#include <asm/bootinfo.h>
33
#include <asm/bugs.h>
R
Ralf Baechle 已提交
34
#include <asm/cache.h>
35
#include <asm/cdmm.h>
L
Linus Torvalds 已提交
36
#include <asm/cpu.h>
37
#include <asm/debug.h>
L
Linus Torvalds 已提交
38 39
#include <asm/sections.h>
#include <asm/setup.h>
40
#include <asm/smp-ops.h>
41
#include <asm/prom.h>
L
Linus Torvalds 已提交
42

43 44 45 46
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
const char __section(.appended_dtb) __appended_dtb[0x100000];
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */

R
Ralf Baechle 已提交
47
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
L
Linus Torvalds 已提交
48 49 50 51 52 53 54 55 56 57 58 59

EXPORT_SYMBOL(cpu_data);

#ifdef CONFIG_VT
struct screen_info screen_info;
#endif

/*
 * Setup information
 *
 * These are initialized so they are in the .data section
 */
R
Ralf Baechle 已提交
60
unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
L
Linus Torvalds 已提交
61 62 63 64 65

EXPORT_SYMBOL(mips_machtype);

struct boot_mem_map boot_mem_map;

66 67 68 69 70 71
static char __initdata command_line[COMMAND_LINE_SIZE];
char __initdata arcs_cmdline[COMMAND_LINE_SIZE];

#ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
#endif
L
Linus Torvalds 已提交
72 73 74 75 76

/*
 * mips_io_port_base is the begin of the address space to which x86 style
 * I/O ports are mapped.
 */
D
David Daney 已提交
77
const unsigned long mips_io_port_base = -1;
L
Linus Torvalds 已提交
78 79 80 81 82
EXPORT_SYMBOL(mips_io_port_base);

static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };

J
John Crispin 已提交
83 84
static void *detect_magic __initdata = detect_memory_region;

85
void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
L
Linus Torvalds 已提交
86 87
{
	int x = boot_mem_map.nr_map;
88
	int i;
L
Linus Torvalds 已提交
89

90 91 92 93 94 95 96
	/*
	 * If the region reaches the top of the physical address space, adjust
	 * the size slightly so that (start + size) doesn't overflow
	 */
	if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
		--size;

97 98
	/* Sanity check */
	if (start + size < start) {
J
Joe Perches 已提交
99
		pr_warn("Trying to add an invalid memory region, skipped\n");
100 101 102
		return;
	}

L
Linus Torvalds 已提交
103
	/*
104
	 * Try to merge with existing entry, if any.
L
Linus Torvalds 已提交
105
	 */
106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		struct boot_mem_map_entry *entry = boot_mem_map.map + i;
		unsigned long top;

		if (entry->type != type)
			continue;

		if (start + size < entry->addr)
			continue;			/* no overlap */

		if (entry->addr + entry->size < start)
			continue;			/* no overlap */

		top = max(entry->addr + entry->size, start + size);
		entry->addr = min(entry->addr, start);
		entry->size = top - entry->addr;

L
Linus Torvalds 已提交
123 124 125
		return;
	}

126
	if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
127
		pr_err("Ooops! Too many entries in the memory map!\n");
L
Linus Torvalds 已提交
128 129 130 131 132 133 134 135 136
		return;
	}

	boot_mem_map.map[x].addr = start;
	boot_mem_map.map[x].size = size;
	boot_mem_map.map[x].type = type;
	boot_mem_map.nr_map++;
}

137
void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
J
John Crispin 已提交
138 139
{
	void *dm = &detect_magic;
140
	phys_addr_t size;
J
John Crispin 已提交
141 142 143 144 145 146 147 148 149 150 151 152 153 154 155

	for (size = sz_min; size < sz_max; size <<= 1) {
		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
			break;
	}

	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
		((unsigned long long) size) / SZ_1M,
		(unsigned long long) start,
		((unsigned long long) sz_min) / SZ_1M,
		((unsigned long long) sz_max) / SZ_1M);

	add_memory_region(start, size, BOOT_MEM_RAM);
}

156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184
bool __init memory_region_available(phys_addr_t start, phys_addr_t size)
{
	int i;
	bool in_ram = false, free = true;

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		phys_addr_t start_, end_;

		start_ = boot_mem_map.map[i].addr;
		end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;

		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
			if (start >= start_ && start + size <= end_)
				in_ram = true;
			break;
		case BOOT_MEM_RESERVED:
			if ((start >= start_ && start < end_) ||
			    (start < start_ && start + size >= start_))
				free = false;
			break;
		default:
			continue;
		}
	}

	return in_ram && free;
}

L
Linus Torvalds 已提交
185 186 187 188 189 190
static void __init print_memory_map(void)
{
	int i;
	const int field = 2 * sizeof(unsigned long);

	for (i = 0; i < boot_mem_map.nr_map; i++) {
191
		printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
L
Linus Torvalds 已提交
192 193 194 195 196
		       field, (unsigned long long) boot_mem_map.map[i].size,
		       field, (unsigned long long) boot_mem_map.map[i].addr);

		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
197
			printk(KERN_CONT "(usable)\n");
L
Linus Torvalds 已提交
198
			break;
199 200 201
		case BOOT_MEM_INIT_RAM:
			printk(KERN_CONT "(usable after init)\n");
			break;
L
Linus Torvalds 已提交
202
		case BOOT_MEM_ROM_DATA:
203
			printk(KERN_CONT "(ROM data)\n");
L
Linus Torvalds 已提交
204 205
			break;
		case BOOT_MEM_RESERVED:
206
			printk(KERN_CONT "(reserved)\n");
L
Linus Torvalds 已提交
207 208
			break;
		default:
209
			printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
L
Linus Torvalds 已提交
210 211 212 213 214
			break;
		}
	}
}

215 216 217 218 219
/*
 * Manage initrd
 */
#ifdef CONFIG_BLK_DEV_INITRD

220
static int __init rd_start_early(char *p)
L
Linus Torvalds 已提交
221
{
222
	unsigned long start = memparse(p, &p);
L
Linus Torvalds 已提交
223

224
#ifdef CONFIG_64BIT
225 226 227
	/* Guess if the sign extension was forgotten by bootloader */
	if (start < XKPHYS)
		start = (int)start;
L
Linus Torvalds 已提交
228
#endif
229 230 231 232 233 234 235 236 237
	initrd_start = start;
	initrd_end += start;
	return 0;
}
early_param("rd_start", rd_start_early);

static int __init rd_size_early(char *p)
{
	initrd_end += memparse(p, &p);
L
Linus Torvalds 已提交
238 239
	return 0;
}
240
early_param("rd_size", rd_size_early);
L
Linus Torvalds 已提交
241

242
/* it returns the next free pfn after initrd */
243 244
static unsigned long __init init_initrd(void)
{
245
	unsigned long end;
246 247

	/*
248 249 250
	 * Board specific code or command line parser should have
	 * already set up initrd_start and initrd_end. In these cases
	 * perfom sanity checks and use them if all looks good.
251
	 */
252
	if (!initrd_start || initrd_end <= initrd_start)
253 254 255
		goto disable;

	if (initrd_start & ~PAGE_MASK) {
256
		pr_err("initrd start must be page aligned\n");
257
		goto disable;
258
	}
259
	if (initrd_start < PAGE_OFFSET) {
260
		pr_err("initrd start < PAGE_OFFSET\n");
261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
		goto disable;
	}

	/*
	 * Sanitize initrd addresses. For example firmware
	 * can't guess if they need to pass them through
	 * 64-bits values if the kernel has been built in pure
	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
	 * addresses now, so the code can now safely use __pa().
	 */
	end = __pa(initrd_end);
	initrd_end = (unsigned long)__va(end);
	initrd_start = (unsigned long)__va(__pa(initrd_start));

	ROOT_DEV = Root_RAM0;
	return PFN_UP(end);
disable:
	initrd_start = 0;
	initrd_end = 0;
	return 0;
281 282
}

283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311
/* In some conditions (e.g. big endian bootloader with a little endian
   kernel), the initrd might appear byte swapped.  Try to detect this and
   byte swap it if needed.  */
static void __init maybe_bswap_initrd(void)
{
#if defined(CONFIG_CPU_CAVIUM_OCTEON)
	u64 buf;

	/* Check for CPIO signature */
	if (!memcmp((void *)initrd_start, "070701", 6))
		return;

	/* Check for compressed initrd */
	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
		return;

	/* Try again with a byte swapped header */
	buf = swab64p((u64 *)initrd_start);
	if (!memcmp(&buf, "070701", 6) ||
	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
		unsigned long i;

		pr_info("Byteswapped initrd detected\n");
		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
			swab64s((u64 *)i);
	}
#endif
}

312 313 314 315 316 317 318 319
static void __init finalize_initrd(void)
{
	unsigned long size = initrd_end - initrd_start;

	if (size == 0) {
		printk(KERN_INFO "Initrd not found or empty");
		goto disable;
	}
320
	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
321
		printk(KERN_ERR "Initrd extends beyond end of memory");
322 323 324
		goto disable;
	}

325 326
	maybe_bswap_initrd();

327
	reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
328 329
	initrd_below_start_ok = 1;

330 331
	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
		initrd_start, size);
332 333
	return;
disable:
334
	printk(KERN_CONT " - disabling initrd\n");
335 336 337 338 339 340
	initrd_start = 0;
	initrd_end = 0;
}

#else  /* !CONFIG_BLK_DEV_INITRD */

341 342 343 344 345
static unsigned long __init init_initrd(void)
{
	return 0;
}

346 347 348 349
#define finalize_initrd()	do {} while (0)

#endif

350 351 352 353
/*
 * Initialize the bootmem allocator. It also setup initrd related data
 * if needed.
 */
354
#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
355

356
static void __init bootmem_init(void)
L
Linus Torvalds 已提交
357
{
358 359 360 361 362 363
	init_initrd();
	finalize_initrd();
}

#else  /* !CONFIG_SGI_IP27 */

364 365 366 367 368 369 370
static unsigned long __init bootmap_bytes(unsigned long pages)
{
	unsigned long bytes = DIV_ROUND_UP(pages, 8);

	return ALIGN(bytes, sizeof(long));
}

371 372
static void __init bootmem_init(void)
{
373
	unsigned long reserved_end;
374
	unsigned long mapstart = ~0UL;
L
Linus Torvalds 已提交
375
	unsigned long bootmap_size;
376
	bool bootmap_valid = false;
L
Linus Torvalds 已提交
377 378 379
	int i;

	/*
380 381 382 383
	 * Sanity check any INITRD first. We don't take it into account
	 * for bootmem setup initially, rely on the end-of-kernel-code
	 * as our memory range starting point. Once bootmem is inited we
	 * will reserve the area used for the initrd.
L
Linus Torvalds 已提交
384
	 */
385 386
	init_initrd();
	reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
L
Linus Torvalds 已提交
387

388 389 390 391 392 393 394
	/*
	 * max_low_pfn is not a number of pages. The number of pages
	 * of the system is given by 'max_low_pfn - min_low_pfn'.
	 */
	min_low_pfn = ~0UL;
	max_low_pfn = 0;

395 396 397
	/*
	 * Find the highest page frame number we have available.
	 */
L
Linus Torvalds 已提交
398 399 400 401 402 403 404 405
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
406
				+ boot_mem_map.map[i].size);
L
Linus Torvalds 已提交
407

408 409 410 411 412 413 414 415 416 417 418 419 420
#ifndef CONFIG_HIGHMEM
		/*
		 * Skip highmem here so we get an accurate max_low_pfn if low
		 * memory stops short of high memory.
		 * If the region overlaps HIGHMEM_START, end is clipped so
		 * max_pfn excludes the highmem portion.
		 */
		if (start >= PFN_DOWN(HIGHMEM_START))
			continue;
		if (end > PFN_DOWN(HIGHMEM_START))
			end = PFN_DOWN(HIGHMEM_START);
#endif

421 422 423 424
		if (end > max_low_pfn)
			max_low_pfn = end;
		if (start < min_low_pfn)
			min_low_pfn = start;
425
		if (end <= reserved_end)
L
Linus Torvalds 已提交
426
			continue;
427
#ifdef CONFIG_BLK_DEV_INITRD
428
		/* Skip zones before initrd and initrd itself */
429 430 431
		if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
			continue;
#endif
432 433 434
		if (start >= mapstart)
			continue;
		mapstart = max(reserved_end, start);
L
Linus Torvalds 已提交
435 436
	}

437 438
	if (min_low_pfn >= max_low_pfn)
		panic("Incorrect memory mapping !!!");
439
	if (min_low_pfn > ARCH_PFN_OFFSET) {
440 441 442
		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
			(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
			min_low_pfn - ARCH_PFN_OFFSET);
443
	} else if (min_low_pfn < ARCH_PFN_OFFSET) {
444 445
		pr_info("%lu free pages won't be used\n",
			ARCH_PFN_OFFSET - min_low_pfn);
446
	}
447
	min_low_pfn = ARCH_PFN_OFFSET;
448

L
Linus Torvalds 已提交
449 450 451
	/*
	 * Determine low and high memory ranges
	 */
R
Ralf Baechle 已提交
452
	max_pfn = max_low_pfn;
453
	if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
454 455
#ifdef CONFIG_HIGHMEM
		highstart_pfn = PFN_DOWN(HIGHMEM_START);
456
		highend_pfn = max_low_pfn;
L
Linus Torvalds 已提交
457
#endif
458
		max_low_pfn = PFN_DOWN(HIGHMEM_START);
L
Linus Torvalds 已提交
459 460
	}

461 462 463 464 465 466 467 468
#ifdef CONFIG_BLK_DEV_INITRD
	/*
	 * mapstart should be after initrd_end
	 */
	if (initrd_end)
		mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
#endif

L
Linus Torvalds 已提交
469
	/*
470 471
	 * check that mapstart doesn't overlap with any of
	 * memory regions that have been reserved through eg. DTB
L
Linus Torvalds 已提交
472
	 */
473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495
	bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);

	bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
						bootmap_size);
	for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
		unsigned long mapstart_addr;

		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RESERVED:
			mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
						boot_mem_map.map[i].size);
			if (PHYS_PFN(mapstart_addr) < mapstart)
				break;

			bootmap_valid = memory_region_available(mapstart_addr,
								bootmap_size);
			if (bootmap_valid)
				mapstart = PHYS_PFN(mapstart_addr);
			break;
		default:
			break;
		}
	}
496

497 498 499 500 501 502 503 504 505
	if (!bootmap_valid)
		panic("No memory area to place a bootmap bitmap");

	/*
	 * Initialize the boot-time allocator with low memory only.
	 */
	if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
					 min_low_pfn, max_low_pfn))
		panic("Unexpected memory size required for bootmap");
506 507 508 509 510 511 512 513

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
				+ boot_mem_map.map[i].size);

514 515
		if (start <= min_low_pfn)
			start = min_low_pfn;
516 517 518 519 520 521 522 523 524 525 526 527 528 529
		if (start >= end)
			continue;

#ifndef CONFIG_HIGHMEM
		if (end > max_low_pfn)
			end = max_low_pfn;

		/*
		 * ... finally, is the area going away?
		 */
		if (end <= start)
			continue;
#endif

T
Tejun Heo 已提交
530
		memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
531 532
	}

L
Linus Torvalds 已提交
533 534 535 536
	/*
	 * Register fully available low RAM pages with the bootmem allocator.
	 */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
537
		unsigned long start, end, size;
L
Linus Torvalds 已提交
538

539 540 541 542
		start = PFN_UP(boot_mem_map.map[i].addr);
		end   = PFN_DOWN(boot_mem_map.map[i].addr
				    + boot_mem_map.map[i].size);

L
Linus Torvalds 已提交
543 544 545
		/*
		 * Reserve usable memory.
		 */
546 547 548 549 550
		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
			break;
		case BOOT_MEM_INIT_RAM:
			memory_present(0, start, end);
L
Linus Torvalds 已提交
551
			continue;
552 553
		default:
			/* Not usable memory */
554 555 556 557
			if (start > min_low_pfn && end < max_low_pfn)
				reserve_bootmem(boot_mem_map.map[i].addr,
						boot_mem_map.map[i].size,
						BOOTMEM_DEFAULT);
558 559
			continue;
		}
L
Linus Torvalds 已提交
560 561

		/*
562 563
		 * We are rounding up the start address of usable memory
		 * and at the end of the usable range downwards.
L
Linus Torvalds 已提交
564
		 */
565
		if (start >= max_low_pfn)
L
Linus Torvalds 已提交
566
			continue;
567 568 569 570
		if (start < reserved_end)
			start = reserved_end;
		if (end > max_low_pfn)
			end = max_low_pfn;
L
Linus Torvalds 已提交
571 572

		/*
573
		 * ... finally, is the area going away?
L
Linus Torvalds 已提交
574
		 */
575
		if (end <= start)
L
Linus Torvalds 已提交
576
			continue;
577
		size = end - start;
L
Linus Torvalds 已提交
578 579

		/* Register lowmem ranges */
580 581
		free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
		memory_present(0, start, end);
L
Linus Torvalds 已提交
582 583
	}

584 585 586
	/*
	 * Reserve the bootmap memory.
	 */
587
	reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
588

589 590 591 592 593 594 595 596
#ifdef CONFIG_RELOCATABLE
	/*
	 * The kernel reserves all memory below its _end symbol as bootmem,
	 * but the kernel may now be at a much higher address. The memory
	 * between the original and new locations may be returned to the system.
	 */
	if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
		unsigned long offset;
597
		extern void show_kernel_relocation(const char *level);
598 599 600

		offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
		free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
601 602 603 604 605 606 607 608

#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
		/*
		 * This information is necessary when debugging the kernel
		 * But is a security vulnerability otherwise!
		 */
		show_kernel_relocation(KERN_INFO);
#endif
609 610 611
	}
#endif

612 613 614 615
	/*
	 * Reserve initrd memory if needed.
	 */
	finalize_initrd();
L
Linus Torvalds 已提交
616 617
}

618 619
#endif	/* CONFIG_SGI_IP27 */

620
/*
J
Joe Perches 已提交
621
 * arch_mem_init - initialize memory management subsystem
622 623 624 625 626
 *
 *  o plat_mem_setup() detects the memory configuration and will record detected
 *    memory areas using add_memory_region.
 *
 * At this stage the memory configuration of the system is known to the
J
Joe Perches 已提交
627
 * kernel but generic memory management system is still entirely uninitialized.
628 629 630 631
 *
 *  o bootmem_init()
 *  o sparse_init()
 *  o paging_init()
632
 *  o dma_contiguous_reserve()
633 634 635 636
 *
 * At this stage the bootmem allocator is ready to use.
 *
 * NOTE: historically plat_mem_setup did the entire platform initialization.
R
Ralf Baechle 已提交
637
 *	 This was rather impractical because it meant plat_mem_setup had to
638
 * get away without any kind of memory allocator.  To keep old code from
639
 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
640 641 642
 * initialization hook for anything else was introduced.
 */

643
static int usermem __initdata;
644 645 646

static int __init early_parse_mem(char *p)
{
647
	phys_addr_t start, size;
648 649 650 651 652 653 654 655 656

	/*
	 * If a user specifies memory size, we
	 * blow away any automatically generated
	 * size.
	 */
	if (usermem == 0) {
		boot_mem_map.nr_map = 0;
		usermem = 1;
R
Ralf Baechle 已提交
657
	}
658 659 660 661 662 663 664 665 666
	start = 0;
	size = memparse(p, &p);
	if (*p == '@')
		start = memparse(p + 1, &p);

	add_memory_region(start, size, BOOT_MEM_RAM);
	return 0;
}
early_param("mem", early_parse_mem);
667

C
Corey Minyard 已提交
668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698
#ifdef CONFIG_PROC_VMCORE
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
static int __init early_parse_elfcorehdr(char *p)
{
	int i;

	setup_elfcorehdr = memparse(p, &p);

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start = boot_mem_map.map[i].addr;
		unsigned long end = (boot_mem_map.map[i].addr +
				     boot_mem_map.map[i].size);
		if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
			/*
			 * Reserve from the elf core header to the end of
			 * the memory segment, that should all be kdump
			 * reserved memory.
			 */
			setup_elfcorehdr_size = end - setup_elfcorehdr;
			break;
		}
	}
	/*
	 * If we don't find it in the memory map, then we shouldn't
	 * have to worry about it, as the new kernel won't use it.
	 */
	return 0;
}
early_param("elfcorehdr", early_parse_elfcorehdr);
#endif

699
static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
700
{
701
	phys_addr_t size;
702
	int i;
703

704 705 706 707 708 709 710 711 712 713 714 715 716
	size = end - mem;
	if (!size)
		return;

	/* Make sure it is in the boot_mem_map */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		if (mem >= boot_mem_map.map[i].addr &&
		    mem < (boot_mem_map.map[i].addr +
			   boot_mem_map.map[i].size))
			return;
	}
	add_memory_region(mem, size, type);
}
717

718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746
#ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem(void)
{
	unsigned long long total;

	total = max_pfn - min_low_pfn;
	return total << PAGE_SHIFT;
}

static void __init mips_parse_crashkernel(void)
{
	unsigned long long total_mem;
	unsigned long long crash_size, crash_base;
	int ret;

	total_mem = get_total_mem();
	ret = parse_crashkernel(boot_command_line, total_mem,
				&crash_size, &crash_base);
	if (ret != 0 || crash_size <= 0)
		return;

	crashk_res.start = crash_base;
	crashk_res.end	 = crash_base + crash_size - 1;
}

static void __init request_crashkernel(struct resource *res)
{
	int ret;

747 748 749
	if (crashk_res.start == crashk_res.end)
		return;

750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766
	ret = request_resource(res, &crashk_res);
	if (!ret)
		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
			(unsigned long)((crashk_res.end -
					 crashk_res.start + 1) >> 20),
			(unsigned long)(crashk_res.start  >> 20));
}
#else /* !defined(CONFIG_KEXEC)		*/
static void __init mips_parse_crashkernel(void)
{
}

static void __init request_crashkernel(struct resource *res)
{
}
#endif /* !defined(CONFIG_KEXEC)  */

767 768
#define USE_PROM_CMDLINE	IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
#define USE_DTB_CMDLINE		IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
J
Jaedon Shin 已提交
769
#define EXTEND_WITH_PROM	IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
770 771
#define BUILTIN_EXTEND_WITH_PROM	\
	IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
772

773 774
static void __init arch_mem_init(char **cmdline_p)
{
775
	struct memblock_region *reg;
776 777
	extern void plat_mem_setup(void);

778 779 780
	/* call board setup routine */
	plat_mem_setup();

781 782 783 784 785 786 787 788 789 790 791 792
	/*
	 * Make sure all kernel memory is in the maps.  The "UP" and
	 * "DOWN" are opposite for initdata since if it crosses over
	 * into another memory section you don't want that to be
	 * freed when the initdata is freed.
	 */
	arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
			 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
			 BOOT_MEM_RAM);
	arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
			 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
			 BOOT_MEM_INIT_RAM);
793

794
	pr_info("Determined physical RAM map:\n");
795 796
	print_memory_map();

797
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
798 799
	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
800 801 802 803 804
	if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
	    (USE_DTB_CMDLINE && !boot_command_line[0]))
		strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);

	if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
805 806
		if (boot_command_line[0])
			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
807 808 809 810
		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
	}

#if defined(CONFIG_CMDLINE_BOOL)
811
	if (builtin_cmdline[0]) {
812 813
		if (boot_command_line[0])
			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
814
		strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
815
	}
816 817 818 819 820 821

	if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
		if (boot_command_line[0])
			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
	}
822 823 824
#endif
#endif
	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
825 826 827

	*cmdline_p = command_line;

828 829 830
	parse_early_param();

	if (usermem) {
831
		pr_info("User-defined physical RAM map:\n");
832 833 834
		print_memory_map();
	}

835
	bootmem_init();
C
Corey Minyard 已提交
836 837 838 839 840 841 842 843
#ifdef CONFIG_PROC_VMCORE
	if (setup_elfcorehdr && setup_elfcorehdr_size) {
		printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
		       setup_elfcorehdr, setup_elfcorehdr_size);
		reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
				BOOTMEM_DEFAULT);
	}
#endif
844 845

	mips_parse_crashkernel();
R
Ralf Baechle 已提交
846 847 848 849 850 851
#ifdef CONFIG_KEXEC
	if (crashk_res.start != crashk_res.end)
		reserve_bootmem(crashk_res.start,
				crashk_res.end - crashk_res.start + 1,
				BOOTMEM_DEFAULT);
#endif
852
	device_tree_init();
853
	sparse_init();
854
	plat_swiotlb_setup();
855 856 857 858

	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
	/* Tell bootmem about cma reserved memblock section */
	for_each_memblock(reserved, reg)
859 860
		if (reg->size != 0)
			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
861 862 863

	reserve_bootmem_region(__pa_symbol(&__nosave_begin),
			__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
864 865
}

866
static void __init resource_init(void)
L
Linus Torvalds 已提交
867 868 869
{
	int i;

870 871 872
	if (UNCAC_BASE != IO_BASE)
		return;

873 874 875 876
	code_resource.start = __pa_symbol(&_text);
	code_resource.end = __pa_symbol(&_etext) - 1;
	data_resource.start = __pa_symbol(&_etext);
	data_resource.end = __pa_symbol(&_edata) - 1;
L
Linus Torvalds 已提交
877 878 879 880 881 882 883

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		struct resource *res;
		unsigned long start, end;

		start = boot_mem_map.map[i].addr;
		end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
884
		if (start >= HIGHMEM_START)
L
Linus Torvalds 已提交
885
			continue;
886 887
		if (end >= HIGHMEM_START)
			end = HIGHMEM_START - 1;
L
Linus Torvalds 已提交
888 889

		res = alloc_bootmem(sizeof(struct resource));
890 891 892 893 894

		res->start = start;
		res->end = end;
		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;

L
Linus Torvalds 已提交
895 896
		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
897
		case BOOT_MEM_INIT_RAM:
L
Linus Torvalds 已提交
898 899
		case BOOT_MEM_ROM_DATA:
			res->name = "System RAM";
900
			res->flags |= IORESOURCE_SYSRAM;
L
Linus Torvalds 已提交
901 902 903 904 905 906 907 908 909 910 911 912 913 914 915
			break;
		case BOOT_MEM_RESERVED:
		default:
			res->name = "reserved";
		}

		request_resource(&iomem_resource, res);

		/*
		 *  We don't know which RAM region contains kernel data,
		 *  so we try it repeatedly and let the resource manager
		 *  test it.
		 */
		request_resource(res, &code_resource);
		request_resource(res, &data_resource);
R
Ralf Baechle 已提交
916
		request_crashkernel(res);
L
Linus Torvalds 已提交
917 918 919
	}
}

920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938
#ifdef CONFIG_SMP
static void __init prefill_possible_map(void)
{
	int i, possible = num_possible_cpus();

	if (possible > nr_cpu_ids)
		possible = nr_cpu_ids;

	for (i = 0; i < possible; i++)
		set_cpu_possible(i, true);
	for (; i < NR_CPUS; i++)
		set_cpu_possible(i, false);

	nr_cpu_ids = possible;
}
#else
static inline void prefill_possible_map(void) {}
#endif

L
Linus Torvalds 已提交
939 940 941
void __init setup_arch(char **cmdline_p)
{
	cpu_probe();
P
Paul Burton 已提交
942
	mips_cm_probe();
L
Linus Torvalds 已提交
943
	prom_init();
944

945
	setup_early_fdc_console();
946
#ifdef CONFIG_EARLY_PRINTK
947
	setup_early_printk();
948
#endif
L
Linus Torvalds 已提交
949
	cpu_report();
950
	check_bugs_early();
L
Linus Torvalds 已提交
951 952 953

#if defined(CONFIG_VT)
#if defined(CONFIG_VGA_CONSOLE)
R
Ralf Baechle 已提交
954
	conswitchp = &vga_con;
L
Linus Torvalds 已提交
955
#elif defined(CONFIG_DUMMY_CONSOLE)
R
Ralf Baechle 已提交
956
	conswitchp = &dummy_con;
L
Linus Torvalds 已提交
957 958 959
#endif
#endif

960
	arch_mem_init(cmdline_p);
L
Linus Torvalds 已提交
961 962

	resource_init();
963
	plat_smp_setup();
964
	prefill_possible_map();
965 966

	cpu_cache_init();
967
	paging_init();
L
Linus Torvalds 已提交
968 969
}

970 971
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
972

973 974 975 976
#ifdef CONFIG_USE_OF
unsigned long fw_passed_dtb;
#endif

977 978 979 980 981 982 983
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
{
	struct dentry *d;

	d = debugfs_create_dir("mips", NULL);
984 985
	if (!d)
		return -ENOMEM;
986 987 988 989 990
	mips_debugfs_dir = d;
	return 0;
}
arch_initcall(debugfs_mips);
#endif