setup.c 25.3 KB
Newer Older
L
Linus Torvalds 已提交
1 2 3 4 5 6 7 8 9
/*
 *  linux/arch/arm/kernel/setup.c
 *
 *  Copyright (C) 1995-2001 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */
10
#include <linux/export.h>
L
Linus Torvalds 已提交
11 12 13 14 15 16 17 18 19
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
#include <linux/bootmem.h>
#include <linux/seq_file.h>
20
#include <linux/screen_info.h>
21
#include <linux/of_platform.h>
L
Linus Torvalds 已提交
22
#include <linux/init.h>
23
#include <linux/kexec.h>
24
#include <linux/of_fdt.h>
L
Linus Torvalds 已提交
25 26
#include <linux/cpu.h>
#include <linux/interrupt.h>
R
Russell King 已提交
27
#include <linux/smp.h>
28
#include <linux/proc_fs.h>
R
Russell King 已提交
29
#include <linux/memblock.h>
30 31
#include <linux/bug.h>
#include <linux/compiler.h>
32
#include <linux/sort.h>
L
Linus Torvalds 已提交
33

34
#include <asm/unified.h>
35
#include <asm/cp15.h>
L
Linus Torvalds 已提交
36
#include <asm/cpu.h>
37
#include <asm/cputype.h>
L
Linus Torvalds 已提交
38 39
#include <asm/elf.h>
#include <asm/procinfo.h>
S
Stefano Stabellini 已提交
40
#include <asm/psci.h>
R
Russell King 已提交
41
#include <asm/sections.h>
L
Linus Torvalds 已提交
42
#include <asm/setup.h>
43
#include <asm/smp_plat.h>
L
Linus Torvalds 已提交
44 45
#include <asm/mach-types.h>
#include <asm/cacheflush.h>
46
#include <asm/cachetype.h>
L
Linus Torvalds 已提交
47 48
#include <asm/tlbflush.h>

49
#include <asm/prom.h>
L
Linus Torvalds 已提交
50 51 52
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
53 54
#include <asm/system_info.h>
#include <asm/system_misc.h>
J
Jason Wessel 已提交
55
#include <asm/traps.h>
56
#include <asm/unwind.h>
57
#include <asm/memblock.h>
58
#include <asm/virt.h>
L
Linus Torvalds 已提交
59

60
#include "atags.h"
61

L
Linus Torvalds 已提交
62 63 64 65 66 67 68 69 70 71 72 73 74

#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
char fpe_type[8];

static int __init fpe_setup(char *line)
{
	memcpy(fpe_type, line, 8);
	return 1;
}

__setup("fpe=", fpe_setup);
#endif

75
extern void init_default_cache_policy(unsigned long);
76
extern void paging_init(const struct machine_desc *desc);
77 78
extern void early_paging_init(const struct machine_desc *,
			      struct proc_info_list *);
79
extern void sanity_check_meminfo(void);
80
extern enum reboot_mode reboot_mode;
81
extern void setup_dma_zone(const struct machine_desc *desc);
L
Linus Torvalds 已提交
82 83

unsigned int processor_id;
84
EXPORT_SYMBOL(processor_id);
85
unsigned int __machine_arch_type __read_mostly;
L
Linus Torvalds 已提交
86
EXPORT_SYMBOL(__machine_arch_type);
87
unsigned int cacheid __read_mostly;
88
EXPORT_SYMBOL(cacheid);
L
Linus Torvalds 已提交
89

B
Bill Gatliff 已提交
90 91
unsigned int __atags_pointer __initdata;

L
Linus Torvalds 已提交
92 93 94 95 96 97 98 99 100
unsigned int system_rev;
EXPORT_SYMBOL(system_rev);

unsigned int system_serial_low;
EXPORT_SYMBOL(system_serial_low);

unsigned int system_serial_high;
EXPORT_SYMBOL(system_serial_high);

101
unsigned int elf_hwcap __read_mostly;
L
Linus Torvalds 已提交
102 103
EXPORT_SYMBOL(elf_hwcap);

104 105 106
unsigned int elf_hwcap2 __read_mostly;
EXPORT_SYMBOL(elf_hwcap2);

L
Linus Torvalds 已提交
107 108

#ifdef MULTI_CPU
109
struct processor processor __read_mostly;
L
Linus Torvalds 已提交
110 111
#endif
#ifdef MULTI_TLB
112
struct cpu_tlb_fns cpu_tlb __read_mostly;
L
Linus Torvalds 已提交
113 114
#endif
#ifdef MULTI_USER
115
struct cpu_user_fns cpu_user __read_mostly;
L
Linus Torvalds 已提交
116 117
#endif
#ifdef MULTI_CACHE
118
struct cpu_cache_fns cpu_cache __read_mostly;
L
Linus Torvalds 已提交
119
#endif
120
#ifdef CONFIG_OUTER_CACHE
121
struct outer_cache_fns outer_cache __read_mostly;
122
EXPORT_SYMBOL(outer_cache);
123
#endif
L
Linus Torvalds 已提交
124

125 126 127 128 129 130 131
/*
 * Cached cpu_architecture() result for use by assembler code.
 * C code should use the cpu_architecture() function instead of accessing this
 * variable directly.
 */
int __cpu_architecture __read_mostly = CPU_ARCH_UNKNOWN;

R
Russell King 已提交
132 133 134 135 136 137
struct stack {
	u32 irq[3];
	u32 abt[3];
	u32 und[3];
} ____cacheline_aligned;

138
#ifndef CONFIG_CPU_V7M
R
Russell King 已提交
139
static struct stack stacks[NR_CPUS];
140
#endif
R
Russell King 已提交
141

L
Linus Torvalds 已提交
142 143 144 145 146
char elf_platform[ELF_PLATFORM_SIZE];
EXPORT_SYMBOL(elf_platform);

static const char *cpu_name;
static const char *machine_name;
147
static char __initdata cmd_line[COMMAND_LINE_SIZE];
148
const struct machine_desc *machine_desc __initdata;
L
Linus Torvalds 已提交
149 150 151 152 153 154 155 156 157 158

static union { char c[4]; unsigned long l; } endian_test __initdata = { { 'l', '?', '?', 'b' } };
#define ENDIANNESS ((char)endian_test.l)

DEFINE_PER_CPU(struct cpuinfo_arm, cpu_data);

/*
 * Standard memory resources
 */
static struct resource mem_res[] = {
159 160 161 162 163 164 165
	{
		.name = "Video RAM",
		.start = 0,
		.end = 0,
		.flags = IORESOURCE_MEM
	},
	{
166
		.name = "Kernel code",
167 168 169 170 171 172 173 174 175 176
		.start = 0,
		.end = 0,
		.flags = IORESOURCE_MEM
	},
	{
		.name = "Kernel data",
		.start = 0,
		.end = 0,
		.flags = IORESOURCE_MEM
	}
L
Linus Torvalds 已提交
177 178 179 180 181 182 183
};

#define video_ram   mem_res[0]
#define kernel_code mem_res[1]
#define kernel_data mem_res[2]

static struct resource io_res[] = {
184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201
	{
		.name = "reserved",
		.start = 0x3bc,
		.end = 0x3be,
		.flags = IORESOURCE_IO | IORESOURCE_BUSY
	},
	{
		.name = "reserved",
		.start = 0x378,
		.end = 0x37f,
		.flags = IORESOURCE_IO | IORESOURCE_BUSY
	},
	{
		.name = "reserved",
		.start = 0x278,
		.end = 0x27f,
		.flags = IORESOURCE_IO | IORESOURCE_BUSY
	}
L
Linus Torvalds 已提交
202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217
};

#define lp0 io_res[0]
#define lp1 io_res[1]
#define lp2 io_res[2]

static const char *proc_arch[] = {
	"undefined/unknown",
	"3",
	"4",
	"4T",
	"5",
	"5T",
	"5TE",
	"5TEJ",
	"6TEJ",
218
	"7",
219
	"7M",
L
Linus Torvalds 已提交
220 221 222 223 224 225 226 227
	"?(12)",
	"?(13)",
	"?(14)",
	"?(15)",
	"?(16)",
	"?(17)",
};

228 229 230 231 232 233
#ifdef CONFIG_CPU_V7M
static int __get_cpu_architecture(void)
{
	return CPU_ARCH_ARMv7M;
}
#else
234
static int __get_cpu_architecture(void)
L
Linus Torvalds 已提交
235 236 237
{
	int cpu_arch;

238
	if ((read_cpuid_id() & 0x0008f000) == 0) {
L
Linus Torvalds 已提交
239
		cpu_arch = CPU_ARCH_UNKNOWN;
240 241 242 243
	} else if ((read_cpuid_id() & 0x0008f000) == 0x00007000) {
		cpu_arch = (read_cpuid_id() & (1 << 23)) ? CPU_ARCH_ARMv4T : CPU_ARCH_ARMv3;
	} else if ((read_cpuid_id() & 0x00080000) == 0x00000000) {
		cpu_arch = (read_cpuid_id() >> 16) & 7;
L
Linus Torvalds 已提交
244 245
		if (cpu_arch)
			cpu_arch += CPU_ARCH_ARMv3;
246
	} else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
247 248 249 250 251 252
		unsigned int mmfr0;

		/* Revised CPUID format. Read the Memory Model Feature
		 * Register 0 and check for VMSAv7 or PMSAv7 */
		asm("mrc	p15, 0, %0, c0, c1, 4"
		    : "=r" (mmfr0));
253 254
		if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
		    (mmfr0 & 0x000000f0) >= 0x00000030)
255 256 257 258 259 260 261 262
			cpu_arch = CPU_ARCH_ARMv7;
		else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
			 (mmfr0 & 0x000000f0) == 0x00000020)
			cpu_arch = CPU_ARCH_ARMv6;
		else
			cpu_arch = CPU_ARCH_UNKNOWN;
	} else
		cpu_arch = CPU_ARCH_UNKNOWN;
L
Linus Torvalds 已提交
263 264 265

	return cpu_arch;
}
266
#endif
L
Linus Torvalds 已提交
267

268 269 270 271 272 273 274
int __pure cpu_architecture(void)
{
	BUG_ON(__cpu_architecture == CPU_ARCH_UNKNOWN);

	return __cpu_architecture;
}

275 276 277 278 279
static int cpu_has_aliasing_icache(unsigned int arch)
{
	int aliasing_icache;
	unsigned int id_reg, num_sets, line_size;

280 281 282 283
	/* PIPT caches never alias. */
	if (icache_is_pipt())
		return 0;

284 285 286
	/* arch specifies the register format */
	switch (arch) {
	case CPU_ARCH_ARMv7:
287 288
		asm("mcr	p15, 2, %0, c0, c0, 0 @ set CSSELR"
		    : /* No output operands */
289
		    : "r" (1));
290 291 292
		isb();
		asm("mrc	p15, 1, %0, c0, c0, 0 @ read CCSIDR"
		    : "=r" (id_reg));
293 294 295 296 297 298 299 300 301 302 303 304 305 306 307
		line_size = 4 << ((id_reg & 0x7) + 2);
		num_sets = ((id_reg >> 13) & 0x7fff) + 1;
		aliasing_icache = (line_size * num_sets) > PAGE_SIZE;
		break;
	case CPU_ARCH_ARMv6:
		aliasing_icache = read_cpuid_cachetype() & (1 << 11);
		break;
	default:
		/* I-cache aliases will be handled by D-cache aliasing code */
		aliasing_icache = 0;
	}

	return aliasing_icache;
}

308 309 310 311
static void __init cacheid_init(void)
{
	unsigned int arch = cpu_architecture();

312 313 314
	if (arch == CPU_ARCH_ARMv7M) {
		cacheid = 0;
	} else if (arch >= CPU_ARCH_ARMv6) {
315
		unsigned int cachetype = read_cpuid_cachetype();
316 317
		if ((cachetype & (7 << 29)) == 4 << 29) {
			/* ARMv7 register format */
318
			arch = CPU_ARCH_ARMv7;
319
			cacheid = CACHEID_VIPT_NONALIASING;
320 321
			switch (cachetype & (3 << 14)) {
			case (1 << 14):
322
				cacheid |= CACHEID_ASID_TAGGED;
323 324 325 326 327
				break;
			case (3 << 14):
				cacheid |= CACHEID_PIPT;
				break;
			}
328
		} else {
329 330 331 332 333
			arch = CPU_ARCH_ARMv6;
			if (cachetype & (1 << 23))
				cacheid = CACHEID_VIPT_ALIASING;
			else
				cacheid = CACHEID_VIPT_NONALIASING;
334
		}
335 336
		if (cpu_has_aliasing_icache(arch))
			cacheid |= CACHEID_VIPT_I_ALIASING;
337 338 339
	} else {
		cacheid = CACHEID_VIVT;
	}
340

341
	pr_info("CPU: %s data cache, %s instruction cache\n",
342 343
		cache_is_vivt() ? "VIVT" :
		cache_is_vipt_aliasing() ? "VIPT aliasing" :
344
		cache_is_vipt_nonaliasing() ? "PIPT / VIPT nonaliasing" : "unknown",
345 346
		cache_is_vivt() ? "VIVT" :
		icache_is_vivt_asid_tagged() ? "VIVT ASID tagged" :
347
		icache_is_vipt_aliasing() ? "VIPT aliasing" :
348
		icache_is_pipt() ? "PIPT" :
349
		cache_is_vipt_nonaliasing() ? "VIPT nonaliasing" : "unknown");
350 351
}

L
Linus Torvalds 已提交
352 353 354 355
/*
 * These functions re-use the assembly code in head.S, which
 * already provide the required functionality.
 */
356
extern struct proc_info_list *lookup_processor_type(unsigned int);
357

358
void __init early_print(const char *str, ...)
359 360 361 362 363 364 365 366 367 368 369 370 371 372 373
{
	extern void printascii(const char *);
	char buf[256];
	va_list ap;

	va_start(ap, str);
	vsnprintf(buf, sizeof(buf), str, ap);
	va_end(ap);

#ifdef CONFIG_DEBUG_LL
	printascii(buf);
#endif
	printk("%s", buf);
}

374 375
static void __init cpuid_init_hwcaps(void)
{
376
	unsigned int divide_instrs, vmsa;
377 378 379 380 381 382 383 384 385 386 387 388

	if (cpu_architecture() < CPU_ARCH_ARMv7)
		return;

	divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;

	switch (divide_instrs) {
	case 2:
		elf_hwcap |= HWCAP_IDIVA;
	case 1:
		elf_hwcap |= HWCAP_IDIVT;
	}
389 390 391 392 393

	/* LPAE implies atomic ldrd/strd instructions */
	vmsa = (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xf) >> 0;
	if (vmsa >= 5)
		elf_hwcap |= HWCAP_LPAE;
394 395
}

396 397 398 399 400 401 402 403 404 405 406 407 408 409 410
static void __init feat_v6_fixup(void)
{
	int id = read_cpuid_id();

	if ((id & 0xff0f0000) != 0x41070000)
		return;

	/*
	 * HWCAP_TLS is available only on 1136 r1p0 and later,
	 * see also kuser_get_tls_init.
	 */
	if ((((id >> 4) & 0xfff) == 0xb36) && (((id >> 20) & 3) == 0))
		elf_hwcap &= ~HWCAP_TLS;
}

R
Russell King 已提交
411 412 413
/*
 * cpu_init - initialise one CPU.
 *
R
Russell King 已提交
414
 * cpu_init sets up the per-CPU stacks.
R
Russell King 已提交
415
 */
416
void notrace cpu_init(void)
R
Russell King 已提交
417
{
418
#ifndef CONFIG_CPU_V7M
R
Russell King 已提交
419 420 421 422
	unsigned int cpu = smp_processor_id();
	struct stack *stk = &stacks[cpu];

	if (cpu >= NR_CPUS) {
423
		pr_crit("CPU%u: bad primary CPU number\n", cpu);
R
Russell King 已提交
424 425 426
		BUG();
	}

427 428 429 430 431 432
	/*
	 * This only works on resume and secondary cores. For booting on the
	 * boot cpu, smp_prepare_boot_cpu is called after percpu area setup.
	 */
	set_my_cpu_offset(per_cpu_offset(cpu));

433 434
	cpu_proc_init();

435 436 437 438 439 440 441 442 443 444
	/*
	 * Define the placement constraint for the inline asm directive below.
	 * In Thumb-2, msr with an immediate value is not allowed.
	 */
#ifdef CONFIG_THUMB2_KERNEL
#define PLC	"r"
#else
#define PLC	"I"
#endif

R
Russell King 已提交
445 446 447 448 449
	/*
	 * setup stacks for re-entrant exception handlers
	 */
	__asm__ (
	"msr	cpsr_c, %1\n\t"
450 451
	"add	r14, %0, %2\n\t"
	"mov	sp, r14\n\t"
R
Russell King 已提交
452
	"msr	cpsr_c, %3\n\t"
453 454
	"add	r14, %0, %4\n\t"
	"mov	sp, r14\n\t"
R
Russell King 已提交
455
	"msr	cpsr_c, %5\n\t"
456 457
	"add	r14, %0, %6\n\t"
	"mov	sp, r14\n\t"
R
Russell King 已提交
458 459 460
	"msr	cpsr_c, %7"
	    :
	    : "r" (stk),
461
	      PLC (PSR_F_BIT | PSR_I_BIT | IRQ_MODE),
R
Russell King 已提交
462
	      "I" (offsetof(struct stack, irq[0])),
463
	      PLC (PSR_F_BIT | PSR_I_BIT | ABT_MODE),
R
Russell King 已提交
464
	      "I" (offsetof(struct stack, abt[0])),
465
	      PLC (PSR_F_BIT | PSR_I_BIT | UND_MODE),
R
Russell King 已提交
466
	      "I" (offsetof(struct stack, und[0])),
467
	      PLC (PSR_F_BIT | PSR_I_BIT | SVC_MODE)
468
	    : "r14");
469
#endif
R
Russell King 已提交
470 471
}

472
u32 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = MPIDR_INVALID };
473 474 475 476

void __init smp_setup_processor_id(void)
{
	int i;
477 478
	u32 mpidr = is_smp() ? read_cpuid_mpidr() & MPIDR_HWID_BITMASK : 0;
	u32 cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
479 480

	cpu_logical_map(0) = cpu;
481
	for (i = 1; i < nr_cpu_ids; ++i)
482 483
		cpu_logical_map(i) = i == cpu ? 0 : i;

484 485 486 487 488 489 490
	/*
	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
	 * using percpu variable early, for example, lockdep will
	 * access percpu variable inside lock_release
	 */
	set_my_cpu_offset(0);

491
	pr_info("Booting Linux on physical CPU 0x%x\n", mpidr);
492 493
}

494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559
struct mpidr_hash mpidr_hash;
#ifdef CONFIG_SMP
/**
 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 *			  level in order to build a linear index from an
 *			  MPIDR value. Resulting algorithm is a collision
 *			  free hash carried out through shifting and ORing
 */
static void __init smp_build_mpidr_hash(void)
{
	u32 i, affinity;
	u32 fs[3], bits[3], ls, mask = 0;
	/*
	 * Pre-scan the list of MPIDRS and filter out bits that do
	 * not contribute to affinity levels, ie they never toggle.
	 */
	for_each_possible_cpu(i)
		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
	pr_debug("mask of set bits 0x%x\n", mask);
	/*
	 * Find and stash the last and first bit set at all affinity levels to
	 * check how many bits are required to represent them.
	 */
	for (i = 0; i < 3; i++) {
		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
		/*
		 * Find the MSB bit and LSB bits position
		 * to determine how many bits are required
		 * to express the affinity level.
		 */
		ls = fls(affinity);
		fs[i] = affinity ? ffs(affinity) - 1 : 0;
		bits[i] = ls - fs[i];
	}
	/*
	 * An index can be created from the MPIDR by isolating the
	 * significant bits at each affinity level and by shifting
	 * them in order to compress the 24 bits values space to a
	 * compressed set of values. This is equivalent to hashing
	 * the MPIDR through shifting and ORing. It is a collision free
	 * hash though not minimal since some levels might contain a number
	 * of CPUs that is not an exact power of 2 and their bit
	 * representation might contain holes, eg MPIDR[7:0] = {0x2, 0x80}.
	 */
	mpidr_hash.shift_aff[0] = fs[0];
	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_BITS + fs[1] - bits[0];
	mpidr_hash.shift_aff[2] = 2*MPIDR_LEVEL_BITS + fs[2] -
						(bits[1] + bits[0]);
	mpidr_hash.mask = mask;
	mpidr_hash.bits = bits[2] + bits[1] + bits[0];
	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] mask[0x%x] bits[%u]\n",
				mpidr_hash.shift_aff[0],
				mpidr_hash.shift_aff[1],
				mpidr_hash.shift_aff[2],
				mpidr_hash.mask,
				mpidr_hash.bits);
	/*
	 * 4x is an arbitrary value used to warn on a hash table much bigger
	 * than expected on most systems.
	 */
	if (mpidr_hash_size() > 4 * num_possible_cpus())
		pr_warn("Large number of MPIDR hash buckets detected\n");
	sync_cache_w(&mpidr_hash);
}
#endif

560 561 562 563 564 565 566 567 568 569 570
static void __init setup_processor(void)
{
	struct proc_info_list *list;

	/*
	 * locate processor in the list of supported processor
	 * types.  The linker builds this table for us from the
	 * entries in arch/arm/mm/proc-*.S
	 */
	list = lookup_processor_type(read_cpuid_id());
	if (!list) {
571 572
		pr_err("CPU configuration botched (ID %08x), unable to continue.\n",
		       read_cpuid_id());
573 574 575 576
		while (1);
	}

	cpu_name = list->cpu_name;
577
	__cpu_architecture = __get_cpu_architecture();
578 579 580 581 582 583 584 585 586 587 588 589 590 591

#ifdef MULTI_CPU
	processor = *list->proc;
#endif
#ifdef MULTI_TLB
	cpu_tlb = *list->tlb;
#endif
#ifdef MULTI_USER
	cpu_user = *list->user;
#endif
#ifdef MULTI_CACHE
	cpu_cache = *list->cache;
#endif

592 593
	pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
		cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
594
		proc_arch[cpu_architecture()], get_cr());
595

596 597 598 599
	snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
		 list->arch_name, ENDIANNESS);
	snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
		 list->elf_name, ENDIANNESS);
600
	elf_hwcap = list->elf_hwcap;
601 602 603

	cpuid_init_hwcaps();

604
#ifndef CONFIG_ARM_THUMB
605
	elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
606
#endif
607 608 609
#ifdef CONFIG_MMU
	init_default_cache_policy(list->__cpu_mm_mmu_flags);
#endif
610 611
	erratum_a15_798181_init();

612 613 614 615 616 617
	feat_v6_fixup();

	cacheid_init();
	cpu_init();
}

618
void __init dump_machine_table(void)
L
Linus Torvalds 已提交
619
{
620
	const struct machine_desc *p;
L
Linus Torvalds 已提交
621

622 623
	early_print("Available machine support:\n\nID (hex)\tNAME\n");
	for_each_machine_desc(p)
624
		early_print("%08x\t%s\n", p->nr, p->name);
L
Linus Torvalds 已提交
625

626
	early_print("\nPlease check your kernel config and/or bootloader.\n");
L
Linus Torvalds 已提交
627

628 629
	while (true)
		/* can't use cpu_relax() here as it may require MMU setup */;
L
Linus Torvalds 已提交
630 631
}

632
int __init arm_add_memory(u64 start, u64 size)
633
{
634
	u64 aligned_start;
635

636 637 638 639 640
	/*
	 * Ensure that start/size are aligned to a page boundary.
	 * Size is appropriately rounded down, start is rounded up.
	 */
	size -= start & ~PAGE_MASK;
641
	aligned_start = PAGE_ALIGN(start);
642

643 644
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
	if (aligned_start > ULONG_MAX) {
645 646
		pr_crit("Ignoring memory at 0x%08llx outside 32-bit physical address space\n",
			(long long)start);
647 648 649 650
		return -EINVAL;
	}

	if (aligned_start + size > ULONG_MAX) {
651 652
		pr_crit("Truncating memory at 0x%08llx to fit in 32-bit physical address space\n",
			(long long)start);
653 654 655 656 657
		/*
		 * To ensure bank->start + bank->size is representable in
		 * 32 bits, we use ULONG_MAX as the upper limit rather than 4GB.
		 * This means we lose a page after masking.
		 */
658
		size = ULONG_MAX - aligned_start;
659 660 661
	}
#endif

662 663 664 665 666 667 668 669 670 671 672 673 674 675
	if (aligned_start < PHYS_OFFSET) {
		if (aligned_start + size <= PHYS_OFFSET) {
			pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
				aligned_start, aligned_start + size);
			return -EINVAL;
		}

		pr_info("Ignoring memory below PHYS_OFFSET: 0x%08llx-0x%08llx\n",
			aligned_start, (u64)PHYS_OFFSET);

		size -= PHYS_OFFSET - aligned_start;
		aligned_start = PHYS_OFFSET;
	}

L
Laura Abbott 已提交
676 677
	start = aligned_start;
	size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
678 679 680 681 682

	/*
	 * Check whether this memory region has non-zero size or
	 * invalid node number.
	 */
L
Laura Abbott 已提交
683
	if (size == 0)
684 685
		return -EINVAL;

L
Laura Abbott 已提交
686
	memblock_add(start, size);
687
	return 0;
688 689
}

L
Linus Torvalds 已提交
690 691 692 693
/*
 * Pick out the memory size.  We look for mem=size@start,
 * where start and size are "size[KkMm]"
 */
L
Laura Abbott 已提交
694

695
static int __init early_mem(char *p)
L
Linus Torvalds 已提交
696 697
{
	static int usermem __initdata = 0;
698 699
	u64 size;
	u64 start;
700
	char *endp;
L
Linus Torvalds 已提交
701 702 703 704 705 706 707 708

	/*
	 * If the user specifies memory size, we
	 * blow away any automatically generated
	 * size.
	 */
	if (usermem == 0) {
		usermem = 1;
L
Laura Abbott 已提交
709 710
		memblock_remove(memblock_start_of_DRAM(),
			memblock_end_of_DRAM() - memblock_start_of_DRAM());
L
Linus Torvalds 已提交
711 712 713
	}

	start = PHYS_OFFSET;
714 715 716
	size  = memparse(p, &endp);
	if (*endp == '@')
		start = memparse(endp + 1, NULL);
L
Linus Torvalds 已提交
717

A
Andrew Morton 已提交
718
	arm_add_memory(start, size);
L
Linus Torvalds 已提交
719

720
	return 0;
L
Linus Torvalds 已提交
721
}
722
early_param("mem", early_mem);
L
Linus Torvalds 已提交
723

724
static void __init request_standard_resources(const struct machine_desc *mdesc)
L
Linus Torvalds 已提交
725
{
726
	struct memblock_region *region;
L
Linus Torvalds 已提交
727 728
	struct resource *res;

R
Russell King 已提交
729 730
	kernel_code.start   = virt_to_phys(_text);
	kernel_code.end     = virt_to_phys(_etext - 1);
731
	kernel_data.start   = virt_to_phys(_sdata);
R
Russell King 已提交
732
	kernel_data.end     = virt_to_phys(_end - 1);
L
Linus Torvalds 已提交
733

734
	for_each_memblock(memory, region) {
735
		res = memblock_virt_alloc(sizeof(*res), 0);
L
Linus Torvalds 已提交
736
		res->name  = "System RAM";
737 738
		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
L
Linus Torvalds 已提交
739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777
		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;

		request_resource(&iomem_resource, res);

		if (kernel_code.start >= res->start &&
		    kernel_code.end <= res->end)
			request_resource(res, &kernel_code);
		if (kernel_data.start >= res->start &&
		    kernel_data.end <= res->end)
			request_resource(res, &kernel_data);
	}

	if (mdesc->video_start) {
		video_ram.start = mdesc->video_start;
		video_ram.end   = mdesc->video_end;
		request_resource(&iomem_resource, &video_ram);
	}

	/*
	 * Some machines don't have the possibility of ever
	 * possessing lp0, lp1 or lp2
	 */
	if (mdesc->reserve_lp0)
		request_resource(&ioport_resource, &lp0);
	if (mdesc->reserve_lp1)
		request_resource(&ioport_resource, &lp1);
	if (mdesc->reserve_lp2)
		request_resource(&ioport_resource, &lp2);
}

#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
struct screen_info screen_info = {
 .orig_video_lines	= 30,
 .orig_video_cols	= 80,
 .orig_video_mode	= 0,
 .orig_video_ega_bx	= 0,
 .orig_video_isVGA	= 1,
 .orig_video_points	= 8
};
778
#endif
L
Linus Torvalds 已提交
779 780 781

static int __init customize_machine(void)
{
782 783 784 785 786 787
	/*
	 * customizes platform devices, or adds new ones
	 * On DT based machines, we fall back to populating the
	 * machine from the device tree, if no callback is provided,
	 * otherwise we would always need an init_machine callback.
	 */
788 789
	if (machine_desc->init_machine)
		machine_desc->init_machine();
790 791 792 793 794
#ifdef CONFIG_OF
	else
		of_platform_populate(NULL, of_default_bus_match_table,
					NULL, NULL);
#endif
L
Linus Torvalds 已提交
795 796 797 798
	return 0;
}
arch_initcall(customize_machine);

799 800 801 802 803 804 805 806
static int __init init_machine_late(void)
{
	if (machine_desc->init_late)
		machine_desc->init_late();
	return 0;
}
late_initcall(init_machine_late);

807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834
#ifdef CONFIG_KEXEC
static inline unsigned long long get_total_mem(void)
{
	unsigned long total;

	total = max_low_pfn - min_low_pfn;
	return total << PAGE_SHIFT;
}

/**
 * reserve_crashkernel() - reserves memory are for crash kernel
 *
 * This function reserves memory area given in "crashkernel=" kernel command
 * line parameter. The memory reserved is used by a dump capture kernel when
 * primary kernel is crashing.
 */
static void __init reserve_crashkernel(void)
{
	unsigned long long crash_size, crash_base;
	unsigned long long total_mem;
	int ret;

	total_mem = get_total_mem();
	ret = parse_crashkernel(boot_command_line, total_mem,
				&crash_size, &crash_base);
	if (ret)
		return;

835
	ret = memblock_reserve(crash_base, crash_size);
836
	if (ret < 0) {
837 838
		pr_warn("crashkernel reservation failed - memory is in use (0x%lx)\n",
			(unsigned long)crash_base);
839 840 841
		return;
	}

842 843 844 845
	pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
		(unsigned long)(crash_size >> 20),
		(unsigned long)(crash_base >> 20),
		(unsigned long)(total_mem >> 20));
846 847 848 849 850 851 852 853 854

	crashk_res.start = crash_base;
	crashk_res.end = crash_base + crash_size - 1;
	insert_resource(&iomem_resource, &crashk_res);
}
#else
static inline void reserve_crashkernel(void) {}
#endif /* CONFIG_KEXEC */

855 856 857
void __init hyp_mode_check(void)
{
#ifdef CONFIG_ARM_VIRT_EXT
858 859
	sync_boot_mode();

860 861 862 863 864 865 866 867 868 869 870 871
	if (is_hyp_mode_available()) {
		pr_info("CPU: All CPU(s) started in HYP mode.\n");
		pr_info("CPU: Virtualization extensions available.\n");
	} else if (is_hyp_mode_mismatched()) {
		pr_warn("CPU: WARNING: CPU(s) started in wrong/inconsistent modes (primary CPU mode 0x%x)\n",
			__boot_cpu_mode & MODE_MASK);
		pr_warn("CPU: This may indicate a broken bootloader or firmware.\n");
	} else
		pr_info("CPU: All CPU(s) started in SVC mode.\n");
#endif
}

872 873
void __init setup_arch(char **cmdline_p)
{
874
	const struct machine_desc *mdesc;
875 876

	setup_processor();
877 878
	mdesc = setup_machine_fdt(__atags_pointer);
	if (!mdesc)
879
		mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
880 881 882
	machine_desc = mdesc;
	machine_name = mdesc->name;

883 884
	if (mdesc->reboot_mode != REBOOT_HARD)
		reboot_mode = mdesc->reboot_mode;
885

R
Russell King 已提交
886 887 888 889
	init_mm.start_code = (unsigned long) _text;
	init_mm.end_code   = (unsigned long) _etext;
	init_mm.end_data   = (unsigned long) _edata;
	init_mm.brk	   = (unsigned long) _end;
L
Linus Torvalds 已提交
890

891 892 893
	/* populate cmd_line too for later use, preserving boot_command_line */
	strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
	*cmdline_p = cmd_line;
894 895 896

	parse_early_param();

897
	early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
898
	setup_dma_zone(mdesc);
899
	sanity_check_meminfo();
L
Laura Abbott 已提交
900
	arm_memblock_init(mdesc);
R
Russell King 已提交
901

902
	paging_init(mdesc);
903
	request_standard_resources(mdesc);
L
Linus Torvalds 已提交
904

905 906 907
	if (mdesc->restart)
		arm_pm_restart = mdesc->restart;

908 909
	unflatten_device_tree();

910
	arm_dt_init_cpu_maps();
S
Stefano Stabellini 已提交
911
	psci_init();
R
Russell King 已提交
912
#ifdef CONFIG_SMP
913
	if (is_smp()) {
914 915 916 917 918 919
		if (!mdesc->smp_init || !mdesc->smp_init()) {
			if (psci_smp_available())
				smp_set_ops(&psci_smp_ops);
			else if (mdesc->smp)
				smp_set_ops(mdesc->smp);
		}
920
		smp_init_cpus();
921
		smp_build_mpidr_hash();
922
	}
R
Russell King 已提交
923
#endif
924 925 926 927

	if (!is_smp())
		hyp_mode_check();

928
	reserve_crashkernel();
R
Russell King 已提交
929

930 931 932
#ifdef CONFIG_MULTI_IRQ_HANDLER
	handle_arch_irq = mdesc->handle_irq;
#endif
L
Linus Torvalds 已提交
933 934 935 936 937 938 939 940

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
	conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
#endif
#endif
941 942 943

	if (mdesc->init_early)
		mdesc->init_early();
L
Linus Torvalds 已提交
944 945 946 947 948 949 950
}


static int __init topology_init(void)
{
	int cpu;

951 952 953 954 955
	for_each_possible_cpu(cpu) {
		struct cpuinfo_arm *cpuinfo = &per_cpu(cpu_data, cpu);
		cpuinfo->cpu.hotpluggable = 1;
		register_cpu(&cpuinfo->cpu, cpu);
	}
L
Linus Torvalds 已提交
956 957 958 959 960

	return 0;
}
subsys_initcall(topology_init);

961 962 963 964 965 966 967 968 969 970 971 972 973
#ifdef CONFIG_HAVE_PROC_CPU
static int __init proc_cpu_init(void)
{
	struct proc_dir_entry *res;

	res = proc_mkdir("cpu", NULL);
	if (!res)
		return -ENOMEM;
	return 0;
}
fs_initcall(proc_cpu_init);
#endif

L
Linus Torvalds 已提交
974 975 976 977 978 979 980 981 982 983
static const char *hwcap_str[] = {
	"swp",
	"half",
	"thumb",
	"26bit",
	"fastmult",
	"fpa",
	"vfp",
	"edsp",
	"java",
984
	"iwmmxt",
985
	"crunch",
986
	"thumbee",
987
	"neon",
988 989
	"vfpv3",
	"vfpv3d16",
990 991 992 993
	"tls",
	"vfpv4",
	"idiva",
	"idivt",
994
	"vfpd32",
995
	"lpae",
996
	"evtstrm",
L
Linus Torvalds 已提交
997 998 999
	NULL
};

1000
static const char *hwcap2_str[] = {
1001 1002 1003 1004 1005
	"aes",
	"pmull",
	"sha1",
	"sha2",
	"crc32",
1006 1007 1008
	NULL
};

L
Linus Torvalds 已提交
1009 1010
static int c_show(struct seq_file *m, void *v)
{
1011 1012
	int i, j;
	u32 cpuid;
L
Linus Torvalds 已提交
1013 1014

	for_each_online_cpu(i) {
1015 1016 1017 1018 1019 1020
		/*
		 * glibc reads /proc/cpuinfo to determine the number of
		 * online processors, looking for lines beginning with
		 * "processor".  Give glibc what it expects.
		 */
		seq_printf(m, "processor\t: %d\n", i);
1021 1022 1023 1024 1025 1026
		cpuid = is_smp() ? per_cpu(cpu_data, i).cpuid : read_cpuid_id();
		seq_printf(m, "model name\t: %s rev %d (%s)\n",
			   cpu_name, cpuid & 15, elf_platform);

		/* dump out the processor features */
		seq_puts(m, "Features\t: ");
L
Linus Torvalds 已提交
1027

1028 1029 1030
		for (j = 0; hwcap_str[j]; j++)
			if (elf_hwcap & (1 << j))
				seq_printf(m, "%s ", hwcap_str[j]);
L
Linus Torvalds 已提交
1031

1032 1033 1034 1035
		for (j = 0; hwcap2_str[j]; j++)
			if (elf_hwcap2 & (1 << j))
				seq_printf(m, "%s ", hwcap2_str[j]);

1036 1037 1038
		seq_printf(m, "\nCPU implementer\t: 0x%02x\n", cpuid >> 24);
		seq_printf(m, "CPU architecture: %s\n",
			   proc_arch[cpu_architecture()]);
L
Linus Torvalds 已提交
1039

1040 1041 1042
		if ((cpuid & 0x0008f000) == 0x00000000) {
			/* pre-ARM7 */
			seq_printf(m, "CPU part\t: %07x\n", cpuid >> 4);
L
Linus Torvalds 已提交
1043
		} else {
1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054
			if ((cpuid & 0x0008f000) == 0x00007000) {
				/* ARM7 */
				seq_printf(m, "CPU variant\t: 0x%02x\n",
					   (cpuid >> 16) & 127);
			} else {
				/* post-ARM7 */
				seq_printf(m, "CPU variant\t: 0x%x\n",
					   (cpuid >> 20) & 15);
			}
			seq_printf(m, "CPU part\t: 0x%03x\n",
				   (cpuid >> 4) & 0xfff);
L
Linus Torvalds 已提交
1055
		}
1056
		seq_printf(m, "CPU revision\t: %d\n\n", cpuid & 15);
L
Linus Torvalds 已提交
1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081
	}

	seq_printf(m, "Hardware\t: %s\n", machine_name);
	seq_printf(m, "Revision\t: %04x\n", system_rev);
	seq_printf(m, "Serial\t\t: %08x%08x\n",
		   system_serial_high, system_serial_low);

	return 0;
}

static void *c_start(struct seq_file *m, loff_t *pos)
{
	return *pos < 1 ? (void *)1 : NULL;
}

static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
	++*pos;
	return NULL;
}

static void c_stop(struct seq_file *m, void *v)
{
}

1082
const struct seq_operations cpuinfo_op = {
L
Linus Torvalds 已提交
1083 1084 1085 1086 1087
	.start	= c_start,
	.next	= c_next,
	.stop	= c_stop,
	.show	= c_show
};