head.S 22.3 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24
/*
 * Low-level CPU initialisation
 * Based on arch/arm/kernel/head.S
 *
 * Copyright (C) 1994-2002 Russell King
 * Copyright (C) 2003-2012 ARM Ltd.
 * Authors:	Catalin Marinas <catalin.marinas@arm.com>
 *		Will Deacon <will.deacon@arm.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#include <linux/linkage.h>
#include <linux/init.h>
25
#include <linux/irqchip/arm-gic-v3.h>
26 27

#include <asm/assembler.h>
28
#include <asm/boot.h>
29 30
#include <asm/ptrace.h>
#include <asm/asm-offsets.h>
31
#include <asm/cache.h>
32
#include <asm/cputype.h>
33
#include <asm/elf.h>
34
#include <asm/kernel-pgtable.h>
35
#include <asm/kvm_arm.h>
36 37 38 39
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
#include <asm/page.h>
40
#include <asm/smp.h>
41 42
#include <asm/sysreg.h>
#include <asm/thread_info.h>
43
#include <asm/virt.h>
44

45
#define __PHYS_OFFSET	(KERNEL_START - TEXT_OFFSET)
46

47 48 49
#if (TEXT_OFFSET & 0xfff) != 0
#error TEXT_OFFSET must be at least 4KB aligned
#elif (PAGE_OFFSET & 0x1fffff) != 0
M
Mark Rutland 已提交
50
#error PAGE_OFFSET must be at least 2MB aligned
51
#elif TEXT_OFFSET > 0x1fffff
M
Mark Rutland 已提交
52
#error TEXT_OFFSET must be less than 2MB
53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70
#endif

/*
 * Kernel startup entry point.
 * ---------------------------
 *
 * The requirements are:
 *   MMU = off, D-cache = off, I-cache = on or off,
 *   x0 = physical address to the FDT blob.
 *
 * This code is mostly position independent so you call this at
 * __pa(PAGE_OFFSET + TEXT_OFFSET).
 *
 * Note that the callee-saved registers are used for storing variables
 * that are useful before the MMU is enabled. The allocations are described
 * in the entry routines.
 */
	__HEAD
71
_head:
72 73 74
	/*
	 * DO NOT MODIFY. Image header expected by Linux boot-loaders.
	 */
M
Mark Salter 已提交
75 76 77 78 79 80 81 82
#ifdef CONFIG_EFI
	/*
	 * This add instruction has no meaningful effect except that
	 * its opcode forms the magic "MZ" signature required by UEFI.
	 */
	add	x13, x18, #0x16
	b	stext
#else
83 84
	b	stext				// branch to kernel start, magic
	.long	0				// reserved
M
Mark Salter 已提交
85
#endif
86 87 88
	le64sym	_kernel_offset_le		// Image load offset from start of RAM, little-endian
	le64sym	_kernel_size_le			// Effective size of kernel image, little-endian
	le64sym	_kernel_flags_le		// Informative flags, little-endian
R
Roy Franz 已提交
89 90 91 92 93 94 95
	.quad	0				// reserved
	.quad	0				// reserved
	.quad	0				// reserved
	.byte	0x41				// Magic number, "ARM\x64"
	.byte	0x52
	.byte	0x4d
	.byte	0x64
M
Mark Salter 已提交
96
#ifdef CONFIG_EFI
97
	.long	pe_header - _head		// Offset to the PE header.
M
Mark Salter 已提交
98
#else
R
Roy Franz 已提交
99
	.word	0				// reserved
M
Mark Salter 已提交
100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
#endif

#ifdef CONFIG_EFI
	.align 3
pe_header:
	.ascii	"PE"
	.short 	0
coff_header:
	.short	0xaa64				// AArch64
	.short	2				// nr_sections
	.long	0 				// TimeDateStamp
	.long	0				// PointerToSymbolTable
	.long	1				// NumberOfSymbols
	.short	section_table - optional_header	// SizeOfOptionalHeader
	.short	0x206				// Characteristics.
						// IMAGE_FILE_DEBUG_STRIPPED |
						// IMAGE_FILE_EXECUTABLE_IMAGE |
						// IMAGE_FILE_LINE_NUMS_STRIPPED
optional_header:
	.short	0x20b				// PE32+ format
	.byte	0x02				// MajorLinkerVersion
	.byte	0x14				// MinorLinkerVersion
122
	.long	_end - efi_header_end		// SizeOfCode
M
Mark Salter 已提交
123 124
	.long	0				// SizeOfInitializedData
	.long	0				// SizeOfUninitializedData
125
	.long	__efistub_entry - _head		// AddressOfEntryPoint
126
	.long	efi_header_end - _head		// BaseOfCode
M
Mark Salter 已提交
127 128 129

extra_header_fields:
	.quad	0				// ImageBase
130
	.long	0x1000				// SectionAlignment
131
	.long	PECOFF_FILE_ALIGNMENT		// FileAlignment
M
Mark Salter 已提交
132 133 134 135 136 137 138 139
	.short	0				// MajorOperatingSystemVersion
	.short	0				// MinorOperatingSystemVersion
	.short	0				// MajorImageVersion
	.short	0				// MinorImageVersion
	.short	0				// MajorSubsystemVersion
	.short	0				// MinorSubsystemVersion
	.long	0				// Win32VersionValue

140
	.long	_end - _head			// SizeOfImage
M
Mark Salter 已提交
141 142

	// Everything before the kernel image is considered part of the header
143
	.long	efi_header_end - _head		// SizeOfHeaders
M
Mark Salter 已提交
144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186
	.long	0				// CheckSum
	.short	0xa				// Subsystem (EFI application)
	.short	0				// DllCharacteristics
	.quad	0				// SizeOfStackReserve
	.quad	0				// SizeOfStackCommit
	.quad	0				// SizeOfHeapReserve
	.quad	0				// SizeOfHeapCommit
	.long	0				// LoaderFlags
	.long	0x6				// NumberOfRvaAndSizes

	.quad	0				// ExportTable
	.quad	0				// ImportTable
	.quad	0				// ResourceTable
	.quad	0				// ExceptionTable
	.quad	0				// CertificationTable
	.quad	0				// BaseRelocationTable

	// Section table
section_table:

	/*
	 * The EFI application loader requires a relocation section
	 * because EFI applications must be relocatable.  This is a
	 * dummy section as far as we are concerned.
	 */
	.ascii	".reloc"
	.byte	0
	.byte	0			// end of 0 padding of section name
	.long	0
	.long	0
	.long	0			// SizeOfRawData
	.long	0			// PointerToRawData
	.long	0			// PointerToRelocations
	.long	0			// PointerToLineNumbers
	.short	0			// NumberOfRelocations
	.short	0			// NumberOfLineNumbers
	.long	0x42100040		// Characteristics (section flags)


	.ascii	".text"
	.byte	0
	.byte	0
	.byte	0        		// end of 0 padding of section name
187 188 189 190
	.long	_end - efi_header_end	// VirtualSize
	.long	efi_header_end - _head	// VirtualAddress
	.long	_edata - efi_header_end	// SizeOfRawData
	.long	efi_header_end - _head	// PointerToRawData
M
Mark Salter 已提交
191 192 193 194 195 196

	.long	0		// PointerToRelocations (0 for executables)
	.long	0		// PointerToLineNumbers (0 for executables)
	.short	0		// NumberOfRelocations  (0 for executables)
	.short	0		// NumberOfLineNumbers  (0 for executables)
	.long	0xe0500020	// Characteristics (section flags)
197 198

	/*
199
	 * EFI will load .text onwards at the 4k section alignment
200 201
	 * described in the PE/COFF header. To ensure that instruction
	 * sequences using an adrp and a :lo12: immediate will function
202
	 * correctly at this alignment, we must ensure that .text is
203 204 205
	 * placed at a 4k boundary in the Image to begin with.
	 */
	.align 12
206
efi_header_end:
M
Mark Salter 已提交
207
#endif
208

209 210
	__INIT

211
ENTRY(stext)
212
	bl	preserve_boot_args
213
	bl	el2_setup			// Drop to EL1, w20=cpu_boot_mode
214
	adrp	x24, __PHYS_OFFSET
215
	and	x23, x24, MIN_KIMG_ALIGN - 1	// KASLR offset, defaults to 0
216
	bl	set_cpu_boot_mode_flag
217 218
	bl	__create_page_tables		// x25=TTBR0, x26=TTBR1
	/*
219 220
	 * The following calls CPU setup code, see arch/arm64/mm/proc.S for
	 * details.
221 222 223
	 * On return, the CPU will be ready for the MMU to be turned on and
	 * the TCR will have been set.
	 */
224 225
	bl	__cpu_setup			// initialise processor
	adr_l	x27, __primary_switch		// address to jump to after
226
						// MMU has been enabled
227
	b	__enable_mmu
228 229
ENDPROC(stext)

230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246
/*
 * Preserve the arguments passed by the bootloader in x0 .. x3
 */
preserve_boot_args:
	mov	x21, x0				// x21=FDT

	adr_l	x0, boot_args			// record the contents of
	stp	x21, x1, [x0]			// x0 .. x3 at kernel entry
	stp	x2, x3, [x0, #16]

	dmb	sy				// needed before dc ivac with
						// MMU off

	add	x1, x0, #0x20			// 4 x 8 bytes
	b	__inval_cache_range		// tail call
ENDPROC(preserve_boot_args)

247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276
/*
 * Macro to create a table entry to the next page.
 *
 *	tbl:	page table address
 *	virt:	virtual address
 *	shift:	#imm page table shift
 *	ptrs:	#imm pointers per table page
 *
 * Preserves:	virt
 * Corrupts:	tmp1, tmp2
 * Returns:	tbl -> next level table page address
 */
	.macro	create_table_entry, tbl, virt, shift, ptrs, tmp1, tmp2
	lsr	\tmp1, \virt, #\shift
	and	\tmp1, \tmp1, #\ptrs - 1	// table index
	add	\tmp2, \tbl, #PAGE_SIZE
	orr	\tmp2, \tmp2, #PMD_TYPE_TABLE	// address of next table and entry type
	str	\tmp2, [\tbl, \tmp1, lsl #3]
	add	\tbl, \tbl, #PAGE_SIZE		// next level table page
	.endm

/*
 * Macro to populate the PGD (and possibily PUD) for the corresponding
 * block entry in the next level (tbl) for the given virtual address.
 *
 * Preserves:	tbl, next, virt
 * Corrupts:	tmp1, tmp2
 */
	.macro	create_pgd_entry, tbl, virt, tmp1, tmp2
	create_table_entry \tbl, \virt, PGDIR_SHIFT, PTRS_PER_PGD, \tmp1, \tmp2
277 278 279 280
#if SWAPPER_PGTABLE_LEVELS > 3
	create_table_entry \tbl, \virt, PUD_SHIFT, PTRS_PER_PUD, \tmp1, \tmp2
#endif
#if SWAPPER_PGTABLE_LEVELS > 2
281
	create_table_entry \tbl, \virt, SWAPPER_TABLE_SHIFT, PTRS_PER_PTE, \tmp1, \tmp2
282 283 284 285 286 287 288 289 290 291 292
#endif
	.endm

/*
 * Macro to populate block entries in the page table for the start..end
 * virtual range (inclusive).
 *
 * Preserves:	tbl, flags
 * Corrupts:	phys, start, end, pstate
 */
	.macro	create_block_map, tbl, flags, phys, start, end
293 294
	lsr	\phys, \phys, #SWAPPER_BLOCK_SHIFT
	lsr	\start, \start, #SWAPPER_BLOCK_SHIFT
295
	and	\start, \start, #PTRS_PER_PTE - 1	// table index
296 297
	orr	\phys, \flags, \phys, lsl #SWAPPER_BLOCK_SHIFT	// table entry
	lsr	\end, \end, #SWAPPER_BLOCK_SHIFT
298 299 300
	and	\end, \end, #PTRS_PER_PTE - 1		// table end index
9999:	str	\phys, [\tbl, \start, lsl #3]		// store the entry
	add	\start, \start, #1			// next entry
301
	add	\phys, \phys, #SWAPPER_BLOCK_SIZE		// next block
302 303 304 305 306 307 308 309 310
	cmp	\start, \end
	b.ls	9999b
	.endm

/*
 * Setup the initial page tables. We only setup the barest amount which is
 * required to get the kernel running. The following sections are required:
 *   - identity mapping to enable the MMU (low address, TTBR0)
 *   - first few MB of the kernel linear mapping to jump to once the MMU has
311
 *     been enabled
312 313
 */
__create_page_tables:
314 315
	adrp	x25, idmap_pg_dir
	adrp	x26, swapper_pg_dir
316
	mov	x28, lr
317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337

	/*
	 * Invalidate the idmap and swapper page tables to avoid potential
	 * dirty cache lines being evicted.
	 */
	mov	x0, x25
	add	x1, x26, #SWAPPER_DIR_SIZE
	bl	__inval_cache_range

	/*
	 * Clear the idmap and swapper page tables.
	 */
	mov	x0, x25
	add	x6, x26, #SWAPPER_DIR_SIZE
1:	stp	xzr, xzr, [x0], #16
	stp	xzr, xzr, [x0], #16
	stp	xzr, xzr, [x0], #16
	stp	xzr, xzr, [x0], #16
	cmp	x0, x6
	b.lo	1b

338
	mov	x7, SWAPPER_MM_MMUFLAGS
339 340 341 342 343

	/*
	 * Create the identity mapping.
	 */
	mov	x0, x25				// idmap_pg_dir
344
	adrp	x3, __idmap_text_start		// __pa(__idmap_text_start)
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366

#ifndef CONFIG_ARM64_VA_BITS_48
#define EXTRA_SHIFT	(PGDIR_SHIFT + PAGE_SHIFT - 3)
#define EXTRA_PTRS	(1 << (48 - EXTRA_SHIFT))

	/*
	 * If VA_BITS < 48, it may be too small to allow for an ID mapping to be
	 * created that covers system RAM if that is located sufficiently high
	 * in the physical address space. So for the ID map, use an extended
	 * virtual range in that case, by configuring an additional translation
	 * level.
	 * First, we have to verify our assumption that the current value of
	 * VA_BITS was chosen such that all translation levels are fully
	 * utilised, and that lowering T0SZ will always result in an additional
	 * translation level to be configured.
	 */
#if VA_BITS != EXTRA_SHIFT
#error "Mismatch between VA_BITS and page size/number of translation levels"
#endif

	/*
	 * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the
367
	 * entire ID map region can be mapped. As T0SZ == (64 - #bits used),
368
	 * this number conveniently equals the number of leading zeroes in
369
	 * the physical address of __idmap_text_end.
370
	 */
371
	adrp	x5, __idmap_text_end
372 373 374 375
	clz	x5, x5
	cmp	x5, TCR_T0SZ(VA_BITS)	// default T0SZ small enough?
	b.ge	1f			// .. then skip additional level

376 377 378 379
	adr_l	x6, idmap_t0sz
	str	x5, [x6]
	dmb	sy
	dc	ivac, x6		// Invalidate potentially stale cache line
380 381 382 383 384

	create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6
1:
#endif

385
	create_pgd_entry x0, x3, x5, x6
386 387
	mov	x5, x3				// __pa(__idmap_text_start)
	adr_l	x6, __idmap_text_end		// __pa(__idmap_text_end)
388 389 390 391 392 393
	create_block_map x0, x7, x3, x5, x6

	/*
	 * Map the kernel image (starting with PHYS_OFFSET).
	 */
	mov	x0, x26				// swapper_pg_dir
394
	mov_q	x5, KIMAGE_VADDR + TEXT_OFFSET	// compile time __va(_text)
395
	add	x5, x5, x23			// add KASLR displacement
396
	create_pgd_entry x0, x5, x3, x6
397 398 399 400
	adrp	x6, _end			// runtime __pa(_end)
	adrp	x3, _text			// runtime __pa(_text)
	sub	x6, x6, x3			// _end - _text
	add	x6, x6, x5			// runtime __va(_end)
401 402 403 404 405 406 407 408 409
	create_block_map x0, x7, x3, x5, x6

	/*
	 * Since the page tables have been populated with non-cacheable
	 * accesses (MMU disabled), invalidate the idmap and swapper page
	 * tables again to remove any speculatively loaded cache lines.
	 */
	mov	x0, x25
	add	x1, x26, #SWAPPER_DIR_SIZE
410
	dmb	sy
411 412
	bl	__inval_cache_range

413
	ret	x28
414 415 416 417
ENDPROC(__create_page_tables)
	.ltorg

/*
418
 * The following fragment of code is executed with the MMU enabled.
419
 */
420
	.set	initial_sp, init_thread_union + THREAD_START_SP
421
__primary_switched:
422
	mov	x28, lr				// preserve LR
423 424 425 426
	adr_l	x8, vectors			// load VBAR_EL1 with virtual
	msr	vbar_el1, x8			// vector table address
	isb

427 428 429 430 431 432
	// Clear BSS
	adr_l	x0, __bss_start
	mov	x1, xzr
	adr_l	x2, __bss_stop
	sub	x2, x2, x0
	bl	__pi_memset
433
	dsb	ishst				// Make zero page visible to PTW
434

435
	adr_l	sp, initial_sp, x4
436 437 438
	mov	x4, sp
	and	x4, x4, #~(THREAD_SIZE - 1)
	msr	sp_el0, x4			// Save thread_info
439
	str_l	x21, __fdt_pointer, x5		// Save FDT pointer
440

441
	ldr_l	x4, kimage_vaddr		// Save the offset between
442 443 444
	sub	x4, x4, x24			// the kernel virtual and
	str_l	x4, kimage_voffset, x5		// physical mappings

445
	mov	x29, #0
A
Andrey Ryabinin 已提交
446 447
#ifdef CONFIG_KASAN
	bl	kasan_early_init
448 449
#endif
#ifdef CONFIG_RANDOMIZE_BASE
450 451
	tst	x23, ~(MIN_KIMG_ALIGN - 1)	// already running randomized?
	b.ne	0f
452
	mov	x0, x21				// pass FDT address in x0
453
	mov	x1, x23				// pass modulo offset in x1
454 455
	bl	kaslr_early_init		// parse FDT for KASLR options
	cbz	x0, 0f				// KASLR disabled? just proceed
456
	orr	x23, x23, x0			// record KASLR offset
457 458 459
	ret	x28				// we must enable KASLR, return
						// to __enable_mmu()
0:
A
Andrey Ryabinin 已提交
460
#endif
461
	b	start_kernel
462
ENDPROC(__primary_switched)
463 464 465 466 467 468

/*
 * end early head section, begin head code that is also used for
 * hotplug and needs to have the same protections as the text region
 */
	.section ".text","ax"
469 470 471 472

ENTRY(kimage_vaddr)
	.quad		_text - TEXT_OFFSET

473 474 475
/*
 * If we're fortunate enough to boot at EL2, ensure that the world is
 * sane before dropping to EL1.
476 477 478
 *
 * Returns either BOOT_CPU_MODE_EL1 or BOOT_CPU_MODE_EL2 in x20 if
 * booted in EL1 or EL2 respectively.
479 480 481
 */
ENTRY(el2_setup)
	mrs	x0, CurrentEL
482
	cmp	x0, #CurrentEL_EL2
483 484 485 486 487 488 489 490 491 492
	b.ne	1f
	mrs	x0, sctlr_el2
CPU_BE(	orr	x0, x0, #(1 << 25)	)	// Set the EE bit for EL2
CPU_LE(	bic	x0, x0, #(1 << 25)	)	// Clear the EE bit for EL2
	msr	sctlr_el2, x0
	b	2f
1:	mrs	x0, sctlr_el1
CPU_BE(	orr	x0, x0, #(3 << 24)	)	// Set the EE and E0E bits for EL1
CPU_LE(	bic	x0, x0, #(3 << 24)	)	// Clear the EE and E0E bits for EL1
	msr	sctlr_el1, x0
493
	mov	w20, #BOOT_CPU_MODE_EL1		// This cpu booted in EL1
494
	isb
495 496
	ret

497 498 499 500 501 502 503 504 505 506 507 508 509
2:
#ifdef CONFIG_ARM64_VHE
	/*
	 * Check for VHE being present. For the rest of the EL2 setup,
	 * x2 being non-zero indicates that we do have VHE, and that the
	 * kernel is intended to run at EL2.
	 */
	mrs	x2, id_aa64mmfr1_el1
	ubfx	x2, x2, #8, #4
#else
	mov	x2, xzr
#endif

510
	/* Hyp configuration. */
511 512 513 514 515
	mov	x0, #HCR_RW			// 64-bit EL1
	cbz	x2, set_hcr
	orr	x0, x0, #HCR_TGE		// Enable Host Extensions
	orr	x0, x0, #HCR_E2H
set_hcr:
516
	msr	hcr_el2, x0
517
	isb
518 519 520 521 522

	/* Generic timers. */
	mrs	x0, cnthctl_el2
	orr	x0, x0, #3			// Enable EL1 physical timers
	msr	cnthctl_el2, x0
523
	msr	cntvoff_el2, xzr		// Clear virtual offset
524

525 526 527 528 529 530 531
#ifdef CONFIG_ARM_GIC_V3
	/* GICv3 system register access */
	mrs	x0, id_aa64pfr0_el1
	ubfx	x0, x0, #24, #4
	cmp	x0, #1
	b.ne	3f

532
	mrs_s	x0, ICC_SRE_EL2
533 534
	orr	x0, x0, #ICC_SRE_EL2_SRE	// Set ICC_SRE_EL2.SRE==1
	orr	x0, x0, #ICC_SRE_EL2_ENABLE	// Set ICC_SRE_EL2.Enable==1
535
	msr_s	ICC_SRE_EL2, x0
536
	isb					// Make sure SRE is now set
537 538
	mrs_s	x0, ICC_SRE_EL2			// Read SRE back,
	tbz	x0, #0, 3f			// and check that it sticks
539
	msr_s	ICH_HCR_EL2, xzr		// Reset ICC_HCR_EL2 to defaults
540 541 542 543

3:
#endif

544 545 546 547 548 549
	/* Populate ID registers. */
	mrs	x0, midr_el1
	mrs	x1, mpidr_el1
	msr	vpidr_el2, x0
	msr	vmpidr_el2, x1

550 551 552 553 554 555 556 557 558
	/*
	 * When VHE is not in use, early init of EL2 and EL1 needs to be
	 * done here.
	 * When VHE _is_ in use, EL1 will not be used in the host and
	 * requires no configuration, and all non-hyp-specific EL2 setup
	 * will be done via the _EL1 system register aliases in __cpu_setup.
	 */
	cbnz	x2, 1f

559 560
	/* sctlr_el1 */
	mov	x0, #0x0800			// Set/clear RES{1,0} bits
561 562
CPU_BE(	movk	x0, #0x33d0, lsl #16	)	// Set EE and E0E on BE systems
CPU_LE(	movk	x0, #0x30d0, lsl #16	)	// Clear EE and E0E on LE systems
563 564 565 566 567
	msr	sctlr_el1, x0

	/* Coprocessor traps. */
	mov	x0, #0x33ff
	msr	cptr_el2, x0			// Disable copro. traps to EL2
568
1:
569 570 571 572 573

#ifdef CONFIG_COMPAT
	msr	hstr_el2, xzr			// Disable CP15 traps to EL2
#endif

574
	/* EL2 debug */
575 576 577 578
	mrs	x0, id_aa64dfr0_el1		// Check ID_AA64DFR0_EL1 PMUVer
	sbfx	x0, x0, #8, #4
	cmp	x0, #1
	b.lt	4f				// Skip if no PMU present
579 580 581
	mrs	x0, pmcr_el0			// Disable debug access traps
	ubfx	x0, x0, #11, #5			// to EL2 and allow access to
	msr	mdcr_el2, x0			// all PMU counters from EL1
582
4:
583

584 585 586
	/* Stage-2 translation */
	msr	vttbr_el2, xzr

587 588 589 590 591 592 593
	cbz	x2, install_el2_stub

	mov	w20, #BOOT_CPU_MODE_EL2		// This CPU booted in EL2
	isb
	ret

install_el2_stub:
M
Marc Zyngier 已提交
594
	/* Hypervisor stub */
595 596
	adrp	x0, __hyp_stub_vectors
	add	x0, x0, #:lo12:__hyp_stub_vectors
M
Marc Zyngier 已提交
597 598
	msr	vbar_el2, x0

599 600 601 602 603
	/* spsr */
	mov	x0, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
		      PSR_MODE_EL1h)
	msr	spsr_el2, x0
	msr	elr_el2, lr
604
	mov	w20, #BOOT_CPU_MODE_EL2		// This CPU booted in EL2
605 606 607
	eret
ENDPROC(el2_setup)

608 609 610 611
/*
 * Sets the __boot_cpu_mode flag depending on the CPU boot mode passed
 * in x20. See arch/arm64/include/asm/virt.h for more info.
 */
612
set_cpu_boot_mode_flag:
613
	adr_l	x1, __boot_cpu_mode
614 615 616
	cmp	w20, #BOOT_CPU_MODE_EL2
	b.ne	1f
	add	x1, x1, #4
617 618 619
1:	str	w20, [x1]			// This CPU has booted in EL1
	dmb	sy
	dc	ivac, x1			// Invalidate potentially stale cache line
620 621 622
	ret
ENDPROC(set_cpu_boot_mode_flag)

623 624 625 626 627 628 629
/*
 * We need to find out the CPU boot mode long after boot, so we need to
 * store it in a writable variable.
 *
 * This is not in .bss, because we set it sufficiently early that the boot-time
 * zeroing of .bss would clobber it.
 */
630 631
	.pushsection	.data..cacheline_aligned
	.align	L1_CACHE_SHIFT
632
ENTRY(__boot_cpu_mode)
633
	.long	BOOT_CPU_MODE_EL2
634
	.long	BOOT_CPU_MODE_EL1
635 636
	.popsection

637 638 639 640 641
	/*
	 * This provides a "holding pen" for platforms to hold all secondary
	 * cores are held until we're ready for them to initialise.
	 */
ENTRY(secondary_holding_pen)
642 643
	bl	el2_setup			// Drop to EL1, w20=cpu_boot_mode
	bl	set_cpu_boot_mode_flag
644
	mrs	x0, mpidr_el1
645
	mov_q	x1, MPIDR_HWID_BITMASK
646
	and	x0, x0, x1
647
	adr_l	x3, secondary_holding_pen_release
648 649 650 651 652 653
pen:	ldr	x4, [x3]
	cmp	x4, x0
	b.eq	secondary_startup
	wfe
	b	pen
ENDPROC(secondary_holding_pen)
654 655 656 657 658 659 660

	/*
	 * Secondary entry point that jumps straight into the kernel. Only to
	 * be used where CPUs are brought online dynamically by the kernel.
	 */
ENTRY(secondary_entry)
	bl	el2_setup			// Drop to EL1
661
	bl	set_cpu_boot_mode_flag
662 663
	b	secondary_startup
ENDPROC(secondary_entry)
664

665
secondary_startup:
666 667 668
	/*
	 * Common entry point for secondary CPUs.
	 */
669 670
	adrp	x25, idmap_pg_dir
	adrp	x26, swapper_pg_dir
671
	bl	__cpu_setup			// initialise processor
672

673
	adr_l	x27, __secondary_switch		// address to jump to after enabling the MMU
674 675 676
	b	__enable_mmu
ENDPROC(secondary_startup)

677
__secondary_switched:
678 679 680 681
	adr_l	x5, vectors
	msr	vbar_el1, x5
	isb

682 683
	adr_l	x0, secondary_data
	ldr	x0, [x0, #CPU_BOOT_STACK]	// get secondary_data.stack
684
	mov	sp, x0
685 686
	and	x0, x0, #~(THREAD_SIZE - 1)
	msr	sp_el0, x0			// save thread_info
687 688 689 690
	mov	x29, #0
	b	secondary_start_kernel
ENDPROC(__secondary_switched)

691 692 693 694 695 696 697 698 699 700 701 702
/*
 * The booting CPU updates the failed status @__early_cpu_boot_status,
 * with MMU turned off.
 *
 * update_early_cpu_boot_status tmp, status
 *  - Corrupts tmp1, tmp2
 *  - Writes 'status' to __early_cpu_boot_status and makes sure
 *    it is committed to memory.
 */

	.macro	update_early_cpu_boot_status status, tmp1, tmp2
	mov	\tmp2, #\status
703 704
	adr_l	\tmp1, __early_cpu_boot_status
	str	\tmp2, [\tmp1]
705 706 707 708 709 710 711 712 713 714
	dmb	sy
	dc	ivac, \tmp1			// Invalidate potentially stale cache line
	.endm

	.pushsection	.data..cacheline_aligned
	.align	L1_CACHE_SHIFT
ENTRY(__early_cpu_boot_status)
	.long 	0
	.popsection

715
/*
716
 * Enable the MMU.
717
 *
718 719 720
 *  x0  = SCTLR_EL1 value for turning on the MMU.
 *  x27 = *virtual* address to jump to upon completion
 *
721 722 723 724
 * Other registers depend on the function called upon completion.
 *
 * Checks if the selected granule size is supported by the CPU.
 * If it isn't, park the CPU
725
 */
726
	.section	".idmap.text", "ax"
727
ENTRY(__enable_mmu)
728
	mrs	x22, sctlr_el1			// preserve old SCTLR_EL1 value
729 730 731 732
	mrs	x1, ID_AA64MMFR0_EL1
	ubfx	x2, x1, #ID_AA64MMFR0_TGRAN_SHIFT, 4
	cmp	x2, #ID_AA64MMFR0_TGRAN_SUPPORTED
	b.ne	__no_granule_support
733
	update_early_cpu_boot_status 0, x1, x2
734 735 736 737 738
	msr	ttbr0_el1, x25			// load TTBR0
	msr	ttbr1_el1, x26			// load TTBR1
	isb
	msr	sctlr_el1, x0
	isb
739 740 741 742 743 744 745 746
	/*
	 * Invalidate the local I-cache so that any instructions fetched
	 * speculatively from the PoC are discarded, since they may have
	 * been dynamically patched at the PoU.
	 */
	ic	iallu
	dsb	nsh
	isb
747 748 749 750 751 752 753 754 755
#ifdef CONFIG_RANDOMIZE_BASE
	mov	x19, x0				// preserve new SCTLR_EL1 value
	blr	x27

	/*
	 * If we return here, we have a KASLR displacement in x23 which we need
	 * to take into account by discarding the current kernel mapping and
	 * creating a new one.
	 */
756
	msr	sctlr_el1, x22			// disable the MMU
757 758 759 760 761
	isb
	bl	__create_page_tables		// recreate kernel mapping

	msr	sctlr_el1, x19			// re-enable the MMU
	isb
762 763 764
	ic	iallu				// flush instructions fetched
	dsb	nsh				// via old mapping
	isb
765
#endif
766
	br	x27
767
ENDPROC(__enable_mmu)
768 769

__no_granule_support:
770 771 772
	/* Indicate that this CPU can't boot and is stuck in the kernel */
	update_early_cpu_boot_status CPU_STUCK_IN_KERNEL, x1, x2
1:
773
	wfe
774 775
	wfi
	b 1b
776
ENDPROC(__no_granule_support)
777

778 779 780 781 782 783 784 785 786 787
__primary_switch:
#ifdef CONFIG_RELOCATABLE
	/*
	 * Iterate over each entry in the relocation table, and apply the
	 * relocations in place.
	 */
	ldr	w8, =__dynsym_offset		// offset to symbol table
	ldr	w9, =__rela_offset		// offset to reloc table
	ldr	w10, =__rela_size		// size of reloc table

788
	mov_q	x11, KIMAGE_VADDR		// default virtual offset
789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
	add	x11, x11, x23			// actual virtual offset
	add	x8, x8, x11			// __va(.dynsym)
	add	x9, x9, x11			// __va(.rela)
	add	x10, x9, x10			// __va(.rela) + sizeof(.rela)

0:	cmp	x9, x10
	b.hs	2f
	ldp	x11, x12, [x9], #24
	ldr	x13, [x9, #-8]
	cmp	w12, #R_AARCH64_RELATIVE
	b.ne	1f
	add	x13, x13, x23			// relocate
	str	x13, [x11, x23]
	b	0b

1:	cmp	w12, #R_AARCH64_ABS64
	b.ne	0b
	add	x12, x12, x12, lsl #1		// symtab offset: 24x top word
	add	x12, x8, x12, lsr #(32 - 3)	// ... shifted into bottom word
	ldrsh	w14, [x12, #6]			// Elf64_Sym::st_shndx
	ldr	x15, [x12, #8]			// Elf64_Sym::st_value
	cmp	w14, #-0xf			// SHN_ABS (0xfff1) ?
	add	x14, x15, x23			// relocate
	csel	x15, x14, x15, ne
	add	x15, x13, x15
	str	x15, [x11, x23]
	b	0b

2:
#endif
	ldr	x8, =__primary_switched
	br	x8
ENDPROC(__primary_switch)

823 824 825 826
__secondary_switch:
	ldr	x8, =__secondary_switched
	br	x8
ENDPROC(__secondary_switch)