setup.c 25.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9 10
/*
 * This file is subject to the terms and conditions of the GNU General Public
 * License.  See the file "COPYING" in the main directory of this archive
 * for more details.
 *
 * Copyright (C) 1995 Linus Torvalds
 * Copyright (C) 1995 Waldorf Electronics
 * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03  Ralf Baechle
 * Copyright (C) 1996 Stoned Elipot
 * Copyright (C) 1999 Silicon Graphics, Inc.
R
Ralf Baechle 已提交
11
 * Copyright (C) 2000, 2001, 2002, 2007	 Maciej W. Rozycki
L
Linus Torvalds 已提交
12 13 14
 */
#include <linux/init.h>
#include <linux/ioport.h>
15
#include <linux/export.h>
16
#include <linux/screen_info.h>
T
Tejun Heo 已提交
17
#include <linux/memblock.h>
L
Linus Torvalds 已提交
18 19 20 21 22
#include <linux/bootmem.h>
#include <linux/initrd.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
#include <linux/console.h>
D
Dave Hansen 已提交
23
#include <linux/pfn.h>
24
#include <linux/debugfs.h>
R
Ralf Baechle 已提交
25
#include <linux/kexec.h>
J
John Crispin 已提交
26
#include <linux/sizes.h>
27 28
#include <linux/device.h>
#include <linux/dma-contiguous.h>
29
#include <linux/decompress/generic.h>
30
#include <linux/of_fdt.h>
L
Linus Torvalds 已提交
31 32 33

#include <asm/addrspace.h>
#include <asm/bootinfo.h>
34
#include <asm/bugs.h>
R
Ralf Baechle 已提交
35
#include <asm/cache.h>
36
#include <asm/cdmm.h>
L
Linus Torvalds 已提交
37
#include <asm/cpu.h>
38
#include <asm/debug.h>
L
Linus Torvalds 已提交
39 40
#include <asm/sections.h>
#include <asm/setup.h>
41
#include <asm/smp-ops.h>
42
#include <asm/prom.h>
L
Linus Torvalds 已提交
43

44 45 46 47
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
const char __section(.appended_dtb) __appended_dtb[0x100000];
#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */

R
Ralf Baechle 已提交
48
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
L
Linus Torvalds 已提交
49 50 51 52 53 54 55 56 57 58 59 60

EXPORT_SYMBOL(cpu_data);

#ifdef CONFIG_VT
struct screen_info screen_info;
#endif

/*
 * Setup information
 *
 * These are initialized so they are in the .data section
 */
R
Ralf Baechle 已提交
61
unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
L
Linus Torvalds 已提交
62 63 64 65 66

EXPORT_SYMBOL(mips_machtype);

struct boot_mem_map boot_mem_map;

67 68 69 70 71 72
static char __initdata command_line[COMMAND_LINE_SIZE];
char __initdata arcs_cmdline[COMMAND_LINE_SIZE];

#ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
#endif
L
Linus Torvalds 已提交
73 74 75 76 77

/*
 * mips_io_port_base is the begin of the address space to which x86 style
 * I/O ports are mapped.
 */
D
David Daney 已提交
78
const unsigned long mips_io_port_base = -1;
L
Linus Torvalds 已提交
79 80 81 82
EXPORT_SYMBOL(mips_io_port_base);

static struct resource code_resource = { .name = "Kernel code", };
static struct resource data_resource = { .name = "Kernel data", };
83
static struct resource bss_resource = { .name = "Kernel bss", };
L
Linus Torvalds 已提交
84

J
John Crispin 已提交
85 86
static void *detect_magic __initdata = detect_memory_region;

87
void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
L
Linus Torvalds 已提交
88 89
{
	int x = boot_mem_map.nr_map;
90
	int i;
L
Linus Torvalds 已提交
91

92 93 94 95 96 97 98
	/*
	 * If the region reaches the top of the physical address space, adjust
	 * the size slightly so that (start + size) doesn't overflow
	 */
	if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
		--size;

99 100
	/* Sanity check */
	if (start + size < start) {
J
Joe Perches 已提交
101
		pr_warn("Trying to add an invalid memory region, skipped\n");
102 103 104
		return;
	}

L
Linus Torvalds 已提交
105
	/*
106
	 * Try to merge with existing entry, if any.
L
Linus Torvalds 已提交
107
	 */
108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		struct boot_mem_map_entry *entry = boot_mem_map.map + i;
		unsigned long top;

		if (entry->type != type)
			continue;

		if (start + size < entry->addr)
			continue;			/* no overlap */

		if (entry->addr + entry->size < start)
			continue;			/* no overlap */

		top = max(entry->addr + entry->size, start + size);
		entry->addr = min(entry->addr, start);
		entry->size = top - entry->addr;

L
Linus Torvalds 已提交
125 126 127
		return;
	}

128
	if (boot_mem_map.nr_map == BOOT_MEM_MAP_MAX) {
129
		pr_err("Ooops! Too many entries in the memory map!\n");
L
Linus Torvalds 已提交
130 131 132 133 134 135 136 137 138
		return;
	}

	boot_mem_map.map[x].addr = start;
	boot_mem_map.map[x].size = size;
	boot_mem_map.map[x].type = type;
	boot_mem_map.nr_map++;
}

139
void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
J
John Crispin 已提交
140 141
{
	void *dm = &detect_magic;
142
	phys_addr_t size;
J
John Crispin 已提交
143 144 145 146 147 148 149 150 151 152 153 154 155 156 157

	for (size = sz_min; size < sz_max; size <<= 1) {
		if (!memcmp(dm, dm + size, sizeof(detect_magic)))
			break;
	}

	pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
		((unsigned long long) size) / SZ_1M,
		(unsigned long long) start,
		((unsigned long long) sz_min) / SZ_1M,
		((unsigned long long) sz_max) / SZ_1M);

	add_memory_region(start, size, BOOT_MEM_RAM);
}

158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
bool __init memory_region_available(phys_addr_t start, phys_addr_t size)
{
	int i;
	bool in_ram = false, free = true;

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		phys_addr_t start_, end_;

		start_ = boot_mem_map.map[i].addr;
		end_ = boot_mem_map.map[i].addr + boot_mem_map.map[i].size;

		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
			if (start >= start_ && start + size <= end_)
				in_ram = true;
			break;
		case BOOT_MEM_RESERVED:
			if ((start >= start_ && start < end_) ||
			    (start < start_ && start + size >= start_))
				free = false;
			break;
		default:
			continue;
		}
	}

	return in_ram && free;
}

L
Linus Torvalds 已提交
187 188 189 190 191 192
static void __init print_memory_map(void)
{
	int i;
	const int field = 2 * sizeof(unsigned long);

	for (i = 0; i < boot_mem_map.nr_map; i++) {
193
		printk(KERN_INFO " memory: %0*Lx @ %0*Lx ",
L
Linus Torvalds 已提交
194 195 196 197 198
		       field, (unsigned long long) boot_mem_map.map[i].size,
		       field, (unsigned long long) boot_mem_map.map[i].addr);

		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
199
			printk(KERN_CONT "(usable)\n");
L
Linus Torvalds 已提交
200
			break;
201 202 203
		case BOOT_MEM_INIT_RAM:
			printk(KERN_CONT "(usable after init)\n");
			break;
L
Linus Torvalds 已提交
204
		case BOOT_MEM_ROM_DATA:
205
			printk(KERN_CONT "(ROM data)\n");
L
Linus Torvalds 已提交
206 207
			break;
		case BOOT_MEM_RESERVED:
208
			printk(KERN_CONT "(reserved)\n");
L
Linus Torvalds 已提交
209 210
			break;
		default:
211
			printk(KERN_CONT "type %lu\n", boot_mem_map.map[i].type);
L
Linus Torvalds 已提交
212 213 214 215 216
			break;
		}
	}
}

217 218 219 220 221
/*
 * Manage initrd
 */
#ifdef CONFIG_BLK_DEV_INITRD

222
static int __init rd_start_early(char *p)
L
Linus Torvalds 已提交
223
{
224
	unsigned long start = memparse(p, &p);
L
Linus Torvalds 已提交
225

226
#ifdef CONFIG_64BIT
227 228 229
	/* Guess if the sign extension was forgotten by bootloader */
	if (start < XKPHYS)
		start = (int)start;
L
Linus Torvalds 已提交
230
#endif
231 232 233 234 235 236 237 238 239
	initrd_start = start;
	initrd_end += start;
	return 0;
}
early_param("rd_start", rd_start_early);

static int __init rd_size_early(char *p)
{
	initrd_end += memparse(p, &p);
L
Linus Torvalds 已提交
240 241
	return 0;
}
242
early_param("rd_size", rd_size_early);
L
Linus Torvalds 已提交
243

244
/* it returns the next free pfn after initrd */
245 246
static unsigned long __init init_initrd(void)
{
247
	unsigned long end;
248 249

	/*
250 251 252
	 * Board specific code or command line parser should have
	 * already set up initrd_start and initrd_end. In these cases
	 * perfom sanity checks and use them if all looks good.
253
	 */
254
	if (!initrd_start || initrd_end <= initrd_start)
255 256 257
		goto disable;

	if (initrd_start & ~PAGE_MASK) {
258
		pr_err("initrd start must be page aligned\n");
259
		goto disable;
260
	}
261
	if (initrd_start < PAGE_OFFSET) {
262
		pr_err("initrd start < PAGE_OFFSET\n");
263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282
		goto disable;
	}

	/*
	 * Sanitize initrd addresses. For example firmware
	 * can't guess if they need to pass them through
	 * 64-bits values if the kernel has been built in pure
	 * 32-bit. We need also to switch from KSEG0 to XKPHYS
	 * addresses now, so the code can now safely use __pa().
	 */
	end = __pa(initrd_end);
	initrd_end = (unsigned long)__va(end);
	initrd_start = (unsigned long)__va(__pa(initrd_start));

	ROOT_DEV = Root_RAM0;
	return PFN_UP(end);
disable:
	initrd_start = 0;
	initrd_end = 0;
	return 0;
283 284
}

285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313
/* In some conditions (e.g. big endian bootloader with a little endian
   kernel), the initrd might appear byte swapped.  Try to detect this and
   byte swap it if needed.  */
static void __init maybe_bswap_initrd(void)
{
#if defined(CONFIG_CPU_CAVIUM_OCTEON)
	u64 buf;

	/* Check for CPIO signature */
	if (!memcmp((void *)initrd_start, "070701", 6))
		return;

	/* Check for compressed initrd */
	if (decompress_method((unsigned char *)initrd_start, 8, NULL))
		return;

	/* Try again with a byte swapped header */
	buf = swab64p((u64 *)initrd_start);
	if (!memcmp(&buf, "070701", 6) ||
	    decompress_method((unsigned char *)(&buf), 8, NULL)) {
		unsigned long i;

		pr_info("Byteswapped initrd detected\n");
		for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
			swab64s((u64 *)i);
	}
#endif
}

314 315 316 317 318 319 320 321
static void __init finalize_initrd(void)
{
	unsigned long size = initrd_end - initrd_start;

	if (size == 0) {
		printk(KERN_INFO "Initrd not found or empty");
		goto disable;
	}
322
	if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
323
		printk(KERN_ERR "Initrd extends beyond end of memory");
324 325 326
		goto disable;
	}

327 328
	maybe_bswap_initrd();

329
	reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
330 331
	initrd_below_start_ok = 1;

332 333
	pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
		initrd_start, size);
334 335
	return;
disable:
336
	printk(KERN_CONT " - disabling initrd\n");
337 338 339 340 341 342
	initrd_start = 0;
	initrd_end = 0;
}

#else  /* !CONFIG_BLK_DEV_INITRD */

343 344 345 346 347
static unsigned long __init init_initrd(void)
{
	return 0;
}

348 349 350 351
#define finalize_initrd()	do {} while (0)

#endif

352 353 354 355
/*
 * Initialize the bootmem allocator. It also setup initrd related data
 * if needed.
 */
356
#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_NUMA))
357

358
static void __init bootmem_init(void)
L
Linus Torvalds 已提交
359
{
360 361 362 363 364 365
	init_initrd();
	finalize_initrd();
}

#else  /* !CONFIG_SGI_IP27 */

366 367 368 369 370 371 372
static unsigned long __init bootmap_bytes(unsigned long pages)
{
	unsigned long bytes = DIV_ROUND_UP(pages, 8);

	return ALIGN(bytes, sizeof(long));
}

373 374
static void __init bootmem_init(void)
{
375
	unsigned long reserved_end;
376
	unsigned long mapstart = ~0UL;
L
Linus Torvalds 已提交
377
	unsigned long bootmap_size;
378
	bool bootmap_valid = false;
L
Linus Torvalds 已提交
379 380 381
	int i;

	/*
382 383 384 385
	 * Sanity check any INITRD first. We don't take it into account
	 * for bootmem setup initially, rely on the end-of-kernel-code
	 * as our memory range starting point. Once bootmem is inited we
	 * will reserve the area used for the initrd.
L
Linus Torvalds 已提交
386
	 */
387 388
	init_initrd();
	reserved_end = (unsigned long) PFN_UP(__pa_symbol(&_end));
L
Linus Torvalds 已提交
389

390 391 392 393 394 395 396
	/*
	 * max_low_pfn is not a number of pages. The number of pages
	 * of the system is given by 'max_low_pfn - min_low_pfn'.
	 */
	min_low_pfn = ~0UL;
	max_low_pfn = 0;

397 398 399
	/*
	 * Find the highest page frame number we have available.
	 */
L
Linus Torvalds 已提交
400 401 402 403 404 405 406 407
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		if (boot_mem_map.map[i].type != BOOT_MEM_RAM)
			continue;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
408
				+ boot_mem_map.map[i].size);
L
Linus Torvalds 已提交
409

410 411 412 413 414 415 416 417 418 419 420 421 422
#ifndef CONFIG_HIGHMEM
		/*
		 * Skip highmem here so we get an accurate max_low_pfn if low
		 * memory stops short of high memory.
		 * If the region overlaps HIGHMEM_START, end is clipped so
		 * max_pfn excludes the highmem portion.
		 */
		if (start >= PFN_DOWN(HIGHMEM_START))
			continue;
		if (end > PFN_DOWN(HIGHMEM_START))
			end = PFN_DOWN(HIGHMEM_START);
#endif

423 424 425 426
		if (end > max_low_pfn)
			max_low_pfn = end;
		if (start < min_low_pfn)
			min_low_pfn = start;
427
		if (end <= reserved_end)
L
Linus Torvalds 已提交
428
			continue;
429
#ifdef CONFIG_BLK_DEV_INITRD
430
		/* Skip zones before initrd and initrd itself */
431 432 433
		if (initrd_end && end <= (unsigned long)PFN_UP(__pa(initrd_end)))
			continue;
#endif
434 435 436
		if (start >= mapstart)
			continue;
		mapstart = max(reserved_end, start);
L
Linus Torvalds 已提交
437 438
	}

439 440
	if (min_low_pfn >= max_low_pfn)
		panic("Incorrect memory mapping !!!");
441
	if (min_low_pfn > ARCH_PFN_OFFSET) {
442 443 444
		pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
			(min_low_pfn - ARCH_PFN_OFFSET) * sizeof(struct page),
			min_low_pfn - ARCH_PFN_OFFSET);
445
	} else if (min_low_pfn < ARCH_PFN_OFFSET) {
446 447
		pr_info("%lu free pages won't be used\n",
			ARCH_PFN_OFFSET - min_low_pfn);
448
	}
449
	min_low_pfn = ARCH_PFN_OFFSET;
450

L
Linus Torvalds 已提交
451 452 453
	/*
	 * Determine low and high memory ranges
	 */
R
Ralf Baechle 已提交
454
	max_pfn = max_low_pfn;
455
	if (max_low_pfn > PFN_DOWN(HIGHMEM_START)) {
456 457
#ifdef CONFIG_HIGHMEM
		highstart_pfn = PFN_DOWN(HIGHMEM_START);
458
		highend_pfn = max_low_pfn;
L
Linus Torvalds 已提交
459
#endif
460
		max_low_pfn = PFN_DOWN(HIGHMEM_START);
L
Linus Torvalds 已提交
461 462
	}

463 464 465 466 467 468 469 470
#ifdef CONFIG_BLK_DEV_INITRD
	/*
	 * mapstart should be after initrd_end
	 */
	if (initrd_end)
		mapstart = max(mapstart, (unsigned long)PFN_UP(__pa(initrd_end)));
#endif

L
Linus Torvalds 已提交
471
	/*
472 473
	 * check that mapstart doesn't overlap with any of
	 * memory regions that have been reserved through eg. DTB
L
Linus Torvalds 已提交
474
	 */
475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497
	bootmap_size = bootmap_bytes(max_low_pfn - min_low_pfn);

	bootmap_valid = memory_region_available(PFN_PHYS(mapstart),
						bootmap_size);
	for (i = 0; i < boot_mem_map.nr_map && !bootmap_valid; i++) {
		unsigned long mapstart_addr;

		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RESERVED:
			mapstart_addr = PFN_ALIGN(boot_mem_map.map[i].addr +
						boot_mem_map.map[i].size);
			if (PHYS_PFN(mapstart_addr) < mapstart)
				break;

			bootmap_valid = memory_region_available(mapstart_addr,
								bootmap_size);
			if (bootmap_valid)
				mapstart = PHYS_PFN(mapstart_addr);
			break;
		default:
			break;
		}
	}
498

499 500 501 502 503 504 505 506 507
	if (!bootmap_valid)
		panic("No memory area to place a bootmap bitmap");

	/*
	 * Initialize the boot-time allocator with low memory only.
	 */
	if (bootmap_size != init_bootmem_node(NODE_DATA(0), mapstart,
					 min_low_pfn, max_low_pfn))
		panic("Unexpected memory size required for bootmap");
508 509 510 511 512 513 514 515

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start, end;

		start = PFN_UP(boot_mem_map.map[i].addr);
		end = PFN_DOWN(boot_mem_map.map[i].addr
				+ boot_mem_map.map[i].size);

516 517
		if (start <= min_low_pfn)
			start = min_low_pfn;
518 519 520 521 522 523 524 525 526 527 528 529 530 531
		if (start >= end)
			continue;

#ifndef CONFIG_HIGHMEM
		if (end > max_low_pfn)
			end = max_low_pfn;

		/*
		 * ... finally, is the area going away?
		 */
		if (end <= start)
			continue;
#endif

T
Tejun Heo 已提交
532
		memblock_add_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
533 534
	}

L
Linus Torvalds 已提交
535 536 537 538
	/*
	 * Register fully available low RAM pages with the bootmem allocator.
	 */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
539
		unsigned long start, end, size;
L
Linus Torvalds 已提交
540

541 542 543 544
		start = PFN_UP(boot_mem_map.map[i].addr);
		end   = PFN_DOWN(boot_mem_map.map[i].addr
				    + boot_mem_map.map[i].size);

L
Linus Torvalds 已提交
545 546 547
		/*
		 * Reserve usable memory.
		 */
548 549 550 551 552
		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
			break;
		case BOOT_MEM_INIT_RAM:
			memory_present(0, start, end);
L
Linus Torvalds 已提交
553
			continue;
554 555
		default:
			/* Not usable memory */
556 557 558 559
			if (start > min_low_pfn && end < max_low_pfn)
				reserve_bootmem(boot_mem_map.map[i].addr,
						boot_mem_map.map[i].size,
						BOOTMEM_DEFAULT);
560 561
			continue;
		}
L
Linus Torvalds 已提交
562 563

		/*
564 565
		 * We are rounding up the start address of usable memory
		 * and at the end of the usable range downwards.
L
Linus Torvalds 已提交
566
		 */
567
		if (start >= max_low_pfn)
L
Linus Torvalds 已提交
568
			continue;
569 570 571 572
		if (start < reserved_end)
			start = reserved_end;
		if (end > max_low_pfn)
			end = max_low_pfn;
L
Linus Torvalds 已提交
573 574

		/*
575
		 * ... finally, is the area going away?
L
Linus Torvalds 已提交
576
		 */
577
		if (end <= start)
L
Linus Torvalds 已提交
578
			continue;
579
		size = end - start;
L
Linus Torvalds 已提交
580 581

		/* Register lowmem ranges */
582 583
		free_bootmem(PFN_PHYS(start), size << PAGE_SHIFT);
		memory_present(0, start, end);
L
Linus Torvalds 已提交
584 585
	}

586 587 588
	/*
	 * Reserve the bootmap memory.
	 */
589
	reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
590

591 592 593 594 595 596 597 598
#ifdef CONFIG_RELOCATABLE
	/*
	 * The kernel reserves all memory below its _end symbol as bootmem,
	 * but the kernel may now be at a much higher address. The memory
	 * between the original and new locations may be returned to the system.
	 */
	if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
		unsigned long offset;
599
		extern void show_kernel_relocation(const char *level);
600 601 602

		offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
		free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
603 604 605 606 607 608 609 610

#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
		/*
		 * This information is necessary when debugging the kernel
		 * But is a security vulnerability otherwise!
		 */
		show_kernel_relocation(KERN_INFO);
#endif
611 612 613
	}
#endif

614 615 616 617
	/*
	 * Reserve initrd memory if needed.
	 */
	finalize_initrd();
L
Linus Torvalds 已提交
618 619
}

620 621
#endif	/* CONFIG_SGI_IP27 */

622
/*
J
Joe Perches 已提交
623
 * arch_mem_init - initialize memory management subsystem
624 625 626 627 628
 *
 *  o plat_mem_setup() detects the memory configuration and will record detected
 *    memory areas using add_memory_region.
 *
 * At this stage the memory configuration of the system is known to the
J
Joe Perches 已提交
629
 * kernel but generic memory management system is still entirely uninitialized.
630 631 632 633
 *
 *  o bootmem_init()
 *  o sparse_init()
 *  o paging_init()
634
 *  o dma_contiguous_reserve()
635 636 637 638
 *
 * At this stage the bootmem allocator is ready to use.
 *
 * NOTE: historically plat_mem_setup did the entire platform initialization.
R
Ralf Baechle 已提交
639
 *	 This was rather impractical because it meant plat_mem_setup had to
640
 * get away without any kind of memory allocator.  To keep old code from
641
 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
642 643 644
 * initialization hook for anything else was introduced.
 */

645
static int usermem __initdata;
646 647 648

static int __init early_parse_mem(char *p)
{
649
	phys_addr_t start, size;
650 651 652 653 654 655 656 657 658

	/*
	 * If a user specifies memory size, we
	 * blow away any automatically generated
	 * size.
	 */
	if (usermem == 0) {
		boot_mem_map.nr_map = 0;
		usermem = 1;
R
Ralf Baechle 已提交
659
	}
660 661 662 663 664 665
	start = 0;
	size = memparse(p, &p);
	if (*p == '@')
		start = memparse(p + 1, &p);

	add_memory_region(start, size, BOOT_MEM_RAM);
666 667 668 669

	if (start && start > PHYS_OFFSET)
		add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
				BOOT_MEM_RESERVED);
670 671 672
	return 0;
}
early_param("mem", early_parse_mem);
673

674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713
static int __init early_parse_memmap(char *p)
{
	char *oldp;
	u64 start_at, mem_size;

	if (!p)
		return -EINVAL;

	if (!strncmp(p, "exactmap", 8)) {
		pr_err("\"memmap=exactmap\" invalid on MIPS\n");
		return 0;
	}

	oldp = p;
	mem_size = memparse(p, &p);
	if (p == oldp)
		return -EINVAL;

	if (*p == '@') {
		start_at = memparse(p+1, &p);
		add_memory_region(start_at, mem_size, BOOT_MEM_RAM);
	} else if (*p == '#') {
		pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
		return -EINVAL;
	} else if (*p == '$') {
		start_at = memparse(p+1, &p);
		add_memory_region(start_at, mem_size, BOOT_MEM_RESERVED);
	} else {
		pr_err("\"memmap\" invalid format!\n");
		return -EINVAL;
	}

	if (*p == '\0') {
		usermem = 1;
		return 0;
	} else
		return -EINVAL;
}
early_param("memmap", early_parse_memmap);

C
Corey Minyard 已提交
714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744
#ifdef CONFIG_PROC_VMCORE
unsigned long setup_elfcorehdr, setup_elfcorehdr_size;
static int __init early_parse_elfcorehdr(char *p)
{
	int i;

	setup_elfcorehdr = memparse(p, &p);

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		unsigned long start = boot_mem_map.map[i].addr;
		unsigned long end = (boot_mem_map.map[i].addr +
				     boot_mem_map.map[i].size);
		if (setup_elfcorehdr >= start && setup_elfcorehdr < end) {
			/*
			 * Reserve from the elf core header to the end of
			 * the memory segment, that should all be kdump
			 * reserved memory.
			 */
			setup_elfcorehdr_size = end - setup_elfcorehdr;
			break;
		}
	}
	/*
	 * If we don't find it in the memory map, then we shouldn't
	 * have to worry about it, as the new kernel won't use it.
	 */
	return 0;
}
early_param("elfcorehdr", early_parse_elfcorehdr);
#endif

745
static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
746
{
747
	phys_addr_t size;
748
	int i;
749

750 751 752 753 754 755 756 757 758 759 760 761 762
	size = end - mem;
	if (!size)
		return;

	/* Make sure it is in the boot_mem_map */
	for (i = 0; i < boot_mem_map.nr_map; i++) {
		if (mem >= boot_mem_map.map[i].addr &&
		    mem < (boot_mem_map.map[i].addr +
			   boot_mem_map.map[i].size))
			return;
	}
	add_memory_region(mem, size, type);
}
763

764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784
#ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem(void)
{
	unsigned long long total;

	total = max_pfn - min_low_pfn;
	return total << PAGE_SHIFT;
}

static void __init mips_parse_crashkernel(void)
{
	unsigned long long total_mem;
	unsigned long long crash_size, crash_base;
	int ret;

	total_mem = get_total_mem();
	ret = parse_crashkernel(boot_command_line, total_mem,
				&crash_size, &crash_base);
	if (ret != 0 || crash_size <= 0)
		return;

785 786 787 788 789
	if (!memory_region_available(crash_base, crash_size)) {
		pr_warn("Invalid memory region reserved for crash kernel\n");
		return;
	}

790 791 792 793 794 795 796 797
	crashk_res.start = crash_base;
	crashk_res.end	 = crash_base + crash_size - 1;
}

static void __init request_crashkernel(struct resource *res)
{
	int ret;

798 799 800
	if (crashk_res.start == crashk_res.end)
		return;

801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817
	ret = request_resource(res, &crashk_res);
	if (!ret)
		pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
			(unsigned long)((crashk_res.end -
					 crashk_res.start + 1) >> 20),
			(unsigned long)(crashk_res.start  >> 20));
}
#else /* !defined(CONFIG_KEXEC)		*/
static void __init mips_parse_crashkernel(void)
{
}

static void __init request_crashkernel(struct resource *res)
{
}
#endif /* !defined(CONFIG_KEXEC)  */

818 819
#define USE_PROM_CMDLINE	IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
#define USE_DTB_CMDLINE		IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
J
Jaedon Shin 已提交
820
#define EXTEND_WITH_PROM	IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
821 822
#define BUILTIN_EXTEND_WITH_PROM	\
	IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
823

824 825
static void __init arch_mem_init(char **cmdline_p)
{
826
	struct memblock_region *reg;
827 828
	extern void plat_mem_setup(void);

829
#if defined(CONFIG_CMDLINE_BOOL) && defined(CONFIG_CMDLINE_OVERRIDE)
830 831
	strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
832 833 834 835 836
	if ((USE_PROM_CMDLINE && arcs_cmdline[0]) ||
	    (USE_DTB_CMDLINE && !boot_command_line[0]))
		strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);

	if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
837 838
		if (boot_command_line[0])
			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
839 840 841 842
		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
	}

#if defined(CONFIG_CMDLINE_BOOL)
843
	if (builtin_cmdline[0]) {
844 845
		if (boot_command_line[0])
			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
846
		strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
847
	}
848 849 850 851 852 853

	if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
		if (boot_command_line[0])
			strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
		strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
	}
854 855
#endif
#endif
856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875

	/* call board setup routine */
	plat_mem_setup();

	/*
	 * Make sure all kernel memory is in the maps.  The "UP" and
	 * "DOWN" are opposite for initdata since if it crosses over
	 * into another memory section you don't want that to be
	 * freed when the initdata is freed.
	 */
	arch_mem_addpart(PFN_DOWN(__pa_symbol(&_text)) << PAGE_SHIFT,
			 PFN_UP(__pa_symbol(&_edata)) << PAGE_SHIFT,
			 BOOT_MEM_RAM);
	arch_mem_addpart(PFN_UP(__pa_symbol(&__init_begin)) << PAGE_SHIFT,
			 PFN_DOWN(__pa_symbol(&__init_end)) << PAGE_SHIFT,
			 BOOT_MEM_INIT_RAM);

	pr_info("Determined physical RAM map:\n");
	print_memory_map();

876
	strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
877 878 879

	*cmdline_p = command_line;

880 881 882
	parse_early_param();

	if (usermem) {
883
		pr_info("User-defined physical RAM map:\n");
884 885 886
		print_memory_map();
	}

887 888 889
	early_init_fdt_reserve_self();
	early_init_fdt_scan_reserved_mem();

890
	bootmem_init();
C
Corey Minyard 已提交
891 892 893 894 895 896 897 898
#ifdef CONFIG_PROC_VMCORE
	if (setup_elfcorehdr && setup_elfcorehdr_size) {
		printk(KERN_INFO "kdump reserved memory at %lx-%lx\n",
		       setup_elfcorehdr, setup_elfcorehdr_size);
		reserve_bootmem(setup_elfcorehdr, setup_elfcorehdr_size,
				BOOTMEM_DEFAULT);
	}
#endif
899 900

	mips_parse_crashkernel();
R
Ralf Baechle 已提交
901 902 903 904 905 906
#ifdef CONFIG_KEXEC
	if (crashk_res.start != crashk_res.end)
		reserve_bootmem(crashk_res.start,
				crashk_res.end - crashk_res.start + 1,
				BOOTMEM_DEFAULT);
#endif
907
	device_tree_init();
908
	sparse_init();
909
	plat_swiotlb_setup();
910 911 912 913

	dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
	/* Tell bootmem about cma reserved memblock section */
	for_each_memblock(reserved, reg)
914 915
		if (reg->size != 0)
			reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
916 917 918

	reserve_bootmem_region(__pa_symbol(&__nosave_begin),
			__pa_symbol(&__nosave_end)); /* Reserve for hibernation */
919 920
}

921
static void __init resource_init(void)
L
Linus Torvalds 已提交
922 923 924
{
	int i;

925 926 927
	if (UNCAC_BASE != IO_BASE)
		return;

928 929 930 931
	code_resource.start = __pa_symbol(&_text);
	code_resource.end = __pa_symbol(&_etext) - 1;
	data_resource.start = __pa_symbol(&_etext);
	data_resource.end = __pa_symbol(&_edata) - 1;
932 933
	bss_resource.start = __pa_symbol(&__bss_start);
	bss_resource.end = __pa_symbol(&__bss_stop) - 1;
L
Linus Torvalds 已提交
934 935 936 937 938 939 940

	for (i = 0; i < boot_mem_map.nr_map; i++) {
		struct resource *res;
		unsigned long start, end;

		start = boot_mem_map.map[i].addr;
		end = boot_mem_map.map[i].addr + boot_mem_map.map[i].size - 1;
941
		if (start >= HIGHMEM_START)
L
Linus Torvalds 已提交
942
			continue;
943 944
		if (end >= HIGHMEM_START)
			end = HIGHMEM_START - 1;
L
Linus Torvalds 已提交
945 946

		res = alloc_bootmem(sizeof(struct resource));
947 948 949 950 951

		res->start = start;
		res->end = end;
		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;

L
Linus Torvalds 已提交
952 953
		switch (boot_mem_map.map[i].type) {
		case BOOT_MEM_RAM:
954
		case BOOT_MEM_INIT_RAM:
L
Linus Torvalds 已提交
955 956
		case BOOT_MEM_ROM_DATA:
			res->name = "System RAM";
957
			res->flags |= IORESOURCE_SYSRAM;
L
Linus Torvalds 已提交
958 959 960 961 962 963 964 965 966 967 968 969 970 971 972
			break;
		case BOOT_MEM_RESERVED:
		default:
			res->name = "reserved";
		}

		request_resource(&iomem_resource, res);

		/*
		 *  We don't know which RAM region contains kernel data,
		 *  so we try it repeatedly and let the resource manager
		 *  test it.
		 */
		request_resource(res, &code_resource);
		request_resource(res, &data_resource);
973
		request_resource(res, &bss_resource);
R
Ralf Baechle 已提交
974
		request_crashkernel(res);
L
Linus Torvalds 已提交
975 976 977
	}
}

978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996
#ifdef CONFIG_SMP
static void __init prefill_possible_map(void)
{
	int i, possible = num_possible_cpus();

	if (possible > nr_cpu_ids)
		possible = nr_cpu_ids;

	for (i = 0; i < possible; i++)
		set_cpu_possible(i, true);
	for (; i < NR_CPUS; i++)
		set_cpu_possible(i, false);

	nr_cpu_ids = possible;
}
#else
static inline void prefill_possible_map(void) {}
#endif

L
Linus Torvalds 已提交
997 998 999
void __init setup_arch(char **cmdline_p)
{
	cpu_probe();
P
Paul Burton 已提交
1000
	mips_cm_probe();
L
Linus Torvalds 已提交
1001
	prom_init();
1002

1003
	setup_early_fdc_console();
1004
#ifdef CONFIG_EARLY_PRINTK
1005
	setup_early_printk();
1006
#endif
L
Linus Torvalds 已提交
1007
	cpu_report();
1008
	check_bugs_early();
L
Linus Torvalds 已提交
1009 1010 1011

#if defined(CONFIG_VT)
#if defined(CONFIG_VGA_CONSOLE)
R
Ralf Baechle 已提交
1012
	conswitchp = &vga_con;
L
Linus Torvalds 已提交
1013
#elif defined(CONFIG_DUMMY_CONSOLE)
R
Ralf Baechle 已提交
1014
	conswitchp = &dummy_con;
L
Linus Torvalds 已提交
1015 1016 1017
#endif
#endif

1018
	arch_mem_init(cmdline_p);
L
Linus Torvalds 已提交
1019 1020

	resource_init();
1021
	plat_smp_setup();
1022
	prefill_possible_map();
1023 1024

	cpu_cache_init();
1025
	paging_init();
L
Linus Torvalds 已提交
1026 1027
}

1028 1029
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
1030

1031 1032 1033 1034
#ifdef CONFIG_USE_OF
unsigned long fw_passed_dtb;
#endif

1035 1036 1037 1038 1039 1040 1041
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
{
	struct dentry *d;

	d = debugfs_create_dir("mips", NULL);
1042 1043
	if (!d)
		return -ENOMEM;
1044 1045 1046 1047 1048
	mips_debugfs_dir = d;
	return 0;
}
arch_initcall(debugfs_mips);
#endif