setup.c 15.1 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19
/*
 * Based on arch/arm/kernel/setup.c
 *
 * Copyright (C) 1995-2001 Russell King
 * Copyright (C) 2012 ARM Ltd.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

20
#include <linux/acpi.h>
21 22 23 24 25 26 27 28
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/utsname.h>
#include <linux/initrd.h>
#include <linux/console.h>
29
#include <linux/cache.h>
30 31 32 33 34 35 36 37 38 39 40 41 42
#include <linux/bootmem.h>
#include <linux/seq_file.h>
#include <linux/screen_info.h>
#include <linux/init.h>
#include <linux/kexec.h>
#include <linux/crash_dump.h>
#include <linux/root_dev.h>
#include <linux/cpu.h>
#include <linux/interrupt.h>
#include <linux/smp.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/memblock.h>
43
#include <linux/of_iommu.h>
44
#include <linux/of_fdt.h>
45
#include <linux/of_platform.h>
M
Mark Salter 已提交
46
#include <linux/efi.h>
M
Mark Rutland 已提交
47
#include <linux/personality.h>
48
#include <linux/psci.h>
49

50
#include <asm/acpi.h>
M
Mark Salter 已提交
51
#include <asm/fixmap.h>
52
#include <asm/cpu.h>
53 54
#include <asm/cputype.h>
#include <asm/elf.h>
55
#include <asm/cpufeature.h>
M
Mark Rutland 已提交
56
#include <asm/cpu_ops.h>
A
Andrey Ryabinin 已提交
57
#include <asm/kasan.h>
58 59
#include <asm/sections.h>
#include <asm/setup.h>
60
#include <asm/smp_plat.h>
61 62 63 64
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <asm/traps.h>
#include <asm/memblock.h>
M
Mark Salter 已提交
65
#include <asm/efi.h>
66
#include <asm/xen/hypervisor.h>
67

S
Steve Capper 已提交
68
unsigned long elf_hwcap __read_mostly;
69 70
EXPORT_SYMBOL_GPL(elf_hwcap);

71 72 73 74 75 76
#ifdef CONFIG_COMPAT
#define COMPAT_ELF_HWCAP_DEFAULT	\
				(COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
				 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
				 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
				 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
C
Catalin Marinas 已提交
77 78
				 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
				 COMPAT_HWCAP_LPAE)
79
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
80
unsigned int compat_elf_hwcap2 __read_mostly;
81 82
#endif

83
DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
84

85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
phys_addr_t __fdt_pointer __initdata;

/*
 * Standard memory resources
 */
static struct resource mem_res[] = {
	{
		.name = "Kernel code",
		.start = 0,
		.end = 0,
		.flags = IORESOURCE_MEM
	},
	{
		.name = "Kernel data",
		.start = 0,
		.end = 0,
		.flags = IORESOURCE_MEM
	}
};

#define kernel_code mem_res[0]
#define kernel_data mem_res[1]

108 109 110 111 112
/*
 * The recorded values of x0 .. x3 upon kernel entry.
 */
u64 __cacheline_aligned boot_args[4];

113 114
void __init smp_setup_processor_id(void)
{
M
Mark Rutland 已提交
115 116 117
	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
	cpu_logical_map(0) = mpidr;

118 119 120 121 122 123
	/*
	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
	 * using percpu variable early, for example, lockdep will
	 * access percpu variable inside lock_release
	 */
	set_my_cpu_offset(0);
M
Mark Rutland 已提交
124
	pr_info("Booting Linux on physical CPU 0x%lx\n", (unsigned long)mpidr);
125 126
}

127 128 129 130 131
bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
{
	return phys_id == cpu_logical_map(cpu);
}

132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197
struct mpidr_hash mpidr_hash;
/**
 * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
 *			  level in order to build a linear index from an
 *			  MPIDR value. Resulting algorithm is a collision
 *			  free hash carried out through shifting and ORing
 */
static void __init smp_build_mpidr_hash(void)
{
	u32 i, affinity, fs[4], bits[4], ls;
	u64 mask = 0;
	/*
	 * Pre-scan the list of MPIDRS and filter out bits that do
	 * not contribute to affinity levels, ie they never toggle.
	 */
	for_each_possible_cpu(i)
		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
	pr_debug("mask of set bits %#llx\n", mask);
	/*
	 * Find and stash the last and first bit set at all affinity levels to
	 * check how many bits are required to represent them.
	 */
	for (i = 0; i < 4; i++) {
		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
		/*
		 * Find the MSB bit and LSB bits position
		 * to determine how many bits are required
		 * to express the affinity level.
		 */
		ls = fls(affinity);
		fs[i] = affinity ? ffs(affinity) - 1 : 0;
		bits[i] = ls - fs[i];
	}
	/*
	 * An index can be created from the MPIDR_EL1 by isolating the
	 * significant bits at each affinity level and by shifting
	 * them in order to compress the 32 bits values space to a
	 * compressed set of values. This is equivalent to hashing
	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
	 * hash though not minimal since some levels might contain a number
	 * of CPUs that is not an exact power of 2 and their bit
	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
	 */
	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
						(bits[1] + bits[0]);
	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
				  fs[3] - (bits[2] + bits[1] + bits[0]);
	mpidr_hash.mask = mask;
	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
		mpidr_hash.shift_aff[0],
		mpidr_hash.shift_aff[1],
		mpidr_hash.shift_aff[2],
		mpidr_hash.shift_aff[3],
		mpidr_hash.mask,
		mpidr_hash.bits);
	/*
	 * 4x is an arbitrary value used to warn on a hash table much bigger
	 * than expected on most systems.
	 */
	if (mpidr_hash_size() > 4 * num_possible_cpus())
		pr_warn("Large number of MPIDR hash buckets detected\n");
	__flush_dcache_area(&mpidr_hash, sizeof(struct mpidr_hash));
}
198

199
void __init setup_cpu_features(void)
200
{
201 202
	u64 features;
	s64 block;
203 204
	u32 cwg;
	int cls;
205

206 207 208 209 210 211 212 213 214 215 216 217
	/*
	 * Check for sane CTR_EL0.CWG value.
	 */
	cwg = cache_type_cwg();
	cls = cache_line_size();
	if (!cwg)
		pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
			cls);
	if (L1_CACHE_BYTES < cls)
		pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
			L1_CACHE_BYTES, cls);

218 219 220 221 222 223
	/*
	 * ID_AA64ISAR0_EL1 contains 4-bit wide signed feature blocks.
	 * The blocks we test below represent incremental functionality
	 * for non-negative values. Negative values are reserved.
	 */
	features = read_cpuid(ID_AA64ISAR0_EL1);
224 225
	block = cpuid_feature_extract_field(features, 4);
	if (block > 0) {
226 227 228 229 230 231 232 233 234 235 236
		switch (block) {
		default:
		case 2:
			elf_hwcap |= HWCAP_PMULL;
		case 1:
			elf_hwcap |= HWCAP_AES;
		case 0:
			break;
		}
	}

237
	if (cpuid_feature_extract_field(features, 8) > 0)
238 239
		elf_hwcap |= HWCAP_SHA1;

240
	if (cpuid_feature_extract_field(features, 12) > 0)
241 242
		elf_hwcap |= HWCAP_SHA2;

243
	if (cpuid_feature_extract_field(features, 16) > 0)
244
		elf_hwcap |= HWCAP_CRC32;
245

246 247
	block = cpuid_feature_extract_field(features, 20);
	if (block > 0) {
248 249 250 251 252 253 254 255 256 257 258
		switch (block) {
		default:
		case 2:
			elf_hwcap |= HWCAP_ATOMICS;
		case 1:
			/* RESERVED */
		case 0:
			break;
		}
	}

259 260 261
#ifdef CONFIG_COMPAT
	/*
	 * ID_ISAR5_EL1 carries similar information as above, but pertaining to
262
	 * the AArch32 32-bit execution state.
263 264
	 */
	features = read_cpuid(ID_ISAR5_EL1);
265 266
	block = cpuid_feature_extract_field(features, 4);
	if (block > 0) {
267 268 269 270 271 272 273 274 275 276 277
		switch (block) {
		default:
		case 2:
			compat_elf_hwcap2 |= COMPAT_HWCAP2_PMULL;
		case 1:
			compat_elf_hwcap2 |= COMPAT_HWCAP2_AES;
		case 0:
			break;
		}
	}

278
	if (cpuid_feature_extract_field(features, 8) > 0)
279 280
		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA1;

281
	if (cpuid_feature_extract_field(features, 12) > 0)
282 283
		compat_elf_hwcap2 |= COMPAT_HWCAP2_SHA2;

284
	if (cpuid_feature_extract_field(features, 16) > 0)
285 286
		compat_elf_hwcap2 |= COMPAT_HWCAP2_CRC32;
#endif
287 288
}

289 290 291 292 293 294 295
static void __init setup_processor(void)
{
	pr_info("Boot CPU: AArch64 Processor [%08x]\n", read_cpuid_id());
	sprintf(init_utsname()->machine, ELF_PLATFORM);
	cpuinfo_store_boot_cpu();
}

296 297
static void __init setup_machine_fdt(phys_addr_t dt_phys)
{
298 299 300 301 302 303 304 305
	void *dt_virt = fixmap_remap_fdt(dt_phys);

	if (!dt_virt || !early_init_dt_scan(dt_virt)) {
		pr_crit("\n"
			"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
			"The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
			"\nPlease check your bootloader.",
			&dt_phys, dt_virt);
306 307 308 309

		while (true)
			cpu_relax();
	}
310

M
Mark Rutland 已提交
311
	dump_stack_set_arch_desc("%s (DT)", of_flat_dt_get_machine_name());
312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341
}

static void __init request_standard_resources(void)
{
	struct memblock_region *region;
	struct resource *res;

	kernel_code.start   = virt_to_phys(_text);
	kernel_code.end     = virt_to_phys(_etext - 1);
	kernel_data.start   = virt_to_phys(_sdata);
	kernel_data.end     = virt_to_phys(_end - 1);

	for_each_memblock(memory, region) {
		res = alloc_bootmem_low(sizeof(*res));
		res->name  = "System RAM";
		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
		res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;

		request_resource(&iomem_resource, res);

		if (kernel_code.start >= res->start &&
		    kernel_code.end <= res->end)
			request_resource(res, &kernel_code);
		if (kernel_data.start >= res->start &&
		    kernel_data.end <= res->end)
			request_resource(res, &kernel_data);
	}
}

342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402
#ifdef CONFIG_BLK_DEV_INITRD
/*
 * Relocate initrd if it is not completely within the linear mapping.
 * This would be the case if mem= cuts out all or part of it.
 */
static void __init relocate_initrd(void)
{
	phys_addr_t orig_start = __virt_to_phys(initrd_start);
	phys_addr_t orig_end = __virt_to_phys(initrd_end);
	phys_addr_t ram_end = memblock_end_of_DRAM();
	phys_addr_t new_start;
	unsigned long size, to_free = 0;
	void *dest;

	if (orig_end <= ram_end)
		return;

	/*
	 * Any of the original initrd which overlaps the linear map should
	 * be freed after relocating.
	 */
	if (orig_start < ram_end)
		to_free = ram_end - orig_start;

	size = orig_end - orig_start;

	/* initrd needs to be relocated completely inside linear mapping */
	new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
					   size, PAGE_SIZE);
	if (!new_start)
		panic("Cannot relocate initrd of size %ld\n", size);
	memblock_reserve(new_start, size);

	initrd_start = __phys_to_virt(new_start);
	initrd_end   = initrd_start + size;

	pr_info("Moving initrd from [%llx-%llx] to [%llx-%llx]\n",
		orig_start, orig_start + size - 1,
		new_start, new_start + size - 1);

	dest = (void *)initrd_start;

	if (to_free) {
		memcpy(dest, (void *)__phys_to_virt(orig_start), to_free);
		dest += to_free;
	}

	copy_from_early_mem(dest, orig_start + to_free, size - to_free);

	if (to_free) {
		pr_info("Freeing original RAMDISK from [%llx-%llx]\n",
			orig_start, orig_start + to_free - 1);
		memblock_free(orig_start, to_free);
	}
}
#else
static inline void __init relocate_initrd(void)
{
}
#endif

403 404
u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };

405 406 407 408 409 410 411 412 413 414 415
void __init setup_arch(char **cmdline_p)
{
	setup_processor();

	init_mm.start_code = (unsigned long) _text;
	init_mm.end_code   = (unsigned long) _etext;
	init_mm.end_data   = (unsigned long) _edata;
	init_mm.brk	   = (unsigned long) _end;

	*cmdline_p = boot_command_line;

416
	early_fixmap_init();
M
Mark Salter 已提交
417
	early_ioremap_init();
418

419 420
	setup_machine_fdt(__fdt_pointer);

421 422
	parse_early_param();

423 424 425 426 427 428
	/*
	 *  Unmask asynchronous aborts after bringing up possible earlycon.
	 * (Report possible System Errors once we can report this occurred)
	 */
	local_async_enable();

M
Mark Salter 已提交
429
	efi_init();
430 431
	arm64_memblock_init();

432 433 434
	/* Parse the ACPI tables for possible boot-time configuration */
	acpi_boot_table_init();

435
	paging_init();
436
	relocate_initrd();
A
Andrey Ryabinin 已提交
437 438 439

	kasan_init();

440 441
	request_standard_resources();

442
	early_ioremap_reset();
M
Mark Salter 已提交
443

444
	if (acpi_disabled) {
445
		unflatten_device_tree();
446 447 448 449
		psci_dt_init();
	} else {
		psci_acpi_init();
	}
450
	xen_early_init();
451

452 453
	cpu_read_bootcpu_ops();
	smp_init_cpus();
454
	smp_build_mpidr_hash();
455 456 457 458 459 460 461 462

#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
	conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
	conswitchp = &dummy_con;
#endif
#endif
463 464 465 466 467 468
	if (boot_args[1] || boot_args[2] || boot_args[3]) {
		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
			"This indicates a broken bootloader or old kernel\n",
			boot_args[1], boot_args[2], boot_args[3]);
	}
469 470
}

471
static int __init arm64_device_init(void)
472
{
473 474 475 476 477 478 479
	if (of_have_populated_dt()) {
		of_iommu_init();
		of_platform_populate(NULL, of_default_bus_match_table,
				     NULL, NULL);
	} else if (acpi_disabled) {
		pr_crit("Device tree not populated\n");
	}
480 481
	return 0;
}
482
arch_initcall_sync(arm64_device_init);
483

484 485 486 487 488
static int __init topology_init(void)
{
	int i;

	for_each_possible_cpu(i) {
489
		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
490 491 492 493 494 495 496 497 498 499 500
		cpu->hotpluggable = 1;
		register_cpu(cpu, i);
	}

	return 0;
}
subsys_initcall(topology_init);

static const char *hwcap_str[] = {
	"fp",
	"asimd",
501
	"evtstrm",
502 503 504 505 506
	"aes",
	"pmull",
	"sha1",
	"sha2",
	"crc32",
507
	"atomics",
508 509 510
	NULL
};

M
Mark Rutland 已提交
511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546
#ifdef CONFIG_COMPAT
static const char *compat_hwcap_str[] = {
	"swp",
	"half",
	"thumb",
	"26bit",
	"fastmult",
	"fpa",
	"vfp",
	"edsp",
	"java",
	"iwmmxt",
	"crunch",
	"thumbee",
	"neon",
	"vfpv3",
	"vfpv3d16",
	"tls",
	"vfpv4",
	"idiva",
	"idivt",
	"vfpd32",
	"lpae",
	"evtstrm"
};

static const char *compat_hwcap2_str[] = {
	"aes",
	"pmull",
	"sha1",
	"sha2",
	"crc32",
	NULL
};
#endif /* CONFIG_COMPAT */

547 548
static int c_show(struct seq_file *m, void *v)
{
M
Mark Rutland 已提交
549
	int i, j;
550 551

	for_each_online_cpu(i) {
M
Mark Rutland 已提交
552 553 554
		struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i);
		u32 midr = cpuinfo->reg_midr;

555 556 557 558 559 560
		/*
		 * glibc reads /proc/cpuinfo to determine the number of
		 * online processors, looking for lines beginning with
		 * "processor".  Give glibc what it expects.
		 */
		seq_printf(m, "processor\t: %d\n", i);
561

M
Mark Rutland 已提交
562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
		/*
		 * Dump out the common processor features in a single line.
		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
		 * rather than attempting to parse this, but there's a body of
		 * software which does already (at least for 32-bit).
		 */
		seq_puts(m, "Features\t:");
		if (personality(current->personality) == PER_LINUX32) {
#ifdef CONFIG_COMPAT
			for (j = 0; compat_hwcap_str[j]; j++)
				if (compat_elf_hwcap & (1 << j))
					seq_printf(m, " %s", compat_hwcap_str[j]);

			for (j = 0; compat_hwcap2_str[j]; j++)
				if (compat_elf_hwcap2 & (1 << j))
					seq_printf(m, " %s", compat_hwcap2_str[j]);
#endif /* CONFIG_COMPAT */
		} else {
			for (j = 0; hwcap_str[j]; j++)
				if (elf_hwcap & (1 << j))
					seq_printf(m, " %s", hwcap_str[j]);
		}
		seq_puts(m, "\n");

		seq_printf(m, "CPU implementer\t: 0x%02x\n",
			   MIDR_IMPLEMENTOR(midr));
		seq_printf(m, "CPU architecture: 8\n");
		seq_printf(m, "CPU variant\t: 0x%x\n", MIDR_VARIANT(midr));
		seq_printf(m, "CPU part\t: 0x%03x\n", MIDR_PARTNUM(midr));
		seq_printf(m, "CPU revision\t: %d\n\n", MIDR_REVISION(midr));
	}
593

594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617
	return 0;
}

static void *c_start(struct seq_file *m, loff_t *pos)
{
	return *pos < 1 ? (void *)1 : NULL;
}

static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
	++*pos;
	return NULL;
}

static void c_stop(struct seq_file *m, void *v)
{
}

const struct seq_operations cpuinfo_op = {
	.start	= c_start,
	.next	= c_next,
	.stop	= c_stop,
	.show	= c_show
};