start.S 8.7 KB
Newer Older
1
/* SPDX-License-Identifier: GPL-2.0+ */
D
David Feng 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20
/*
 * (C) Copyright 2013
 * David Feng <fenghua@phytium.com.cn>
 */

#include <asm-offsets.h>
#include <config.h>
#include <linux/linkage.h>
#include <asm/macro.h>
#include <asm/armv8/mmu.h>

/*************************************************************************
 *
 * Startup Code (reset vector)
 *
 *************************************************************************/

.globl	_start
_start:
21
#if defined(CONFIG_LINUX_KERNEL_IMAGE_HEADER)
22 23
#include <asm/boot0-linux-kernel-header.h>
#elif defined(CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK)
24 25 26 27 28 29
/*
 * Various SoCs need something special and SoC-specific up front in
 * order to boot, allow them to set that in their boot0.h file and then
 * use it here.
 */
#include <asm/arch/boot0.h>
30 31
#else
	b	reset
32 33
#endif

D
David Feng 已提交
34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55
	.align 3

.globl	_TEXT_BASE
_TEXT_BASE:
	.quad	CONFIG_SYS_TEXT_BASE

/*
 * These are defined in the linker script.
 */
.globl	_end_ofs
_end_ofs:
	.quad	_end - _start

.globl	_bss_start_ofs
_bss_start_ofs:
	.quad	__bss_start - _start

.globl	_bss_end_ofs
_bss_end_ofs:
	.quad	__bss_end - _start

reset:
56 57 58 59 60
	/* Allow the board to save important registers */
	b	save_boot_params
.globl	save_boot_params_ret
save_boot_params_ret:

61
#if CONFIG_POSITION_INDEPENDENT
62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78
	/* Verify that we're 4K aligned.  */
	adr	x0, _start
	ands	x0, x0, #0xfff
	b.eq	1f
0:
	/*
	 * FATAL, can't continue.
	 * U-Boot needs to be loaded at a 4K aligned address.
	 *
	 * We use ADRP and ADD to load some symbol addresses during startup.
	 * The ADD uses an absolute (non pc-relative) lo12 relocation
	 * thus requiring 4K alignment.
	 */
	wfi
	b	0b
1:

79 80 81 82 83 84 85
	/*
	 * Fix .rela.dyn relocations. This allows U-Boot to be loaded to and
	 * executed at a different address than it was linked at.
	 */
pie_fixup:
	adr	x0, _start		/* x0 <- Runtime value of _start */
	ldr	x1, _TEXT_BASE		/* x1 <- Linked value of _start */
86 87
	subs	x9, x0, x1		/* x9 <- Run-vs-link offset */
	beq	pie_fixup_done
88 89 90 91
	adrp    x2, __rel_dyn_start     /* x2 <- Runtime &__rel_dyn_start */
	add     x2, x2, #:lo12:__rel_dyn_start
	adrp    x3, __rel_dyn_end       /* x3 <- Runtime &__rel_dyn_end */
	add     x3, x3, #:lo12:__rel_dyn_end
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106
pie_fix_loop:
	ldp	x0, x1, [x2], #16	/* (x0, x1) <- (Link location, fixup) */
	ldr	x4, [x2], #8		/* x4 <- addend */
	cmp	w1, #1027		/* relative fixup? */
	bne	pie_skip_reloc
	/* relative fix: store addend plus offset at dest location */
	add	x0, x0, x9
	add	x4, x4, x9
	str	x4, [x0]
pie_skip_reloc:
	cmp	x2, x3
	b.lo	pie_fix_loop
pie_fixup_done:
#endif

107 108
#ifdef CONFIG_SYS_RESET_SCTRL
	bl reset_sctrl
109 110
#endif

111
#if defined(CONFIG_ARMV8_SPL_EXCEPTION_VECTORS) || !defined(CONFIG_SPL_BUILD)
112 113 114 115 116 117 118
.macro	set_vbar, regname, reg
	msr	\regname, \reg
.endm
	adr	x0, vectors
#else
.macro	set_vbar, regname, reg
.endm
119
#endif
D
David Feng 已提交
120 121 122 123 124
	/*
	 * Could be EL3/EL2/EL1, Initial State:
	 * Little Endian, MMU Disabled, i/dCache Disabled
	 */
	switch_el x1, 3f, 2f, 1f
125
3:	set_vbar vbar_el3, x0
126
	mrs	x0, scr_el3
D
David Feng 已提交
127 128
	orr	x0, x0, #0xf			/* SCR_EL3.NS|IRQ|FIQ|EA */
	msr	scr_el3, x0
D
David Feng 已提交
129
	msr	cptr_el3, xzr			/* Enable FP/SIMD */
130
#ifdef COUNTER_FREQUENCY
D
David Feng 已提交
131 132
	ldr	x0, =COUNTER_FREQUENCY
	msr	cntfrq_el0, x0			/* Initialize CNTFRQ */
133
#endif
D
David Feng 已提交
134
	b	0f
M
Mark Kettenis 已提交
135 136 137
2:	mrs	x1, hcr_el2
	tbnz	x1, #34, 1f			/* HCR_EL2.E2H */
	set_vbar vbar_el2, x0
D
David Feng 已提交
138 139 140
	mov	x0, #0x33ff
	msr	cptr_el2, x0			/* Enable FP/SIMD */
	b	0f
M
Mark Kettenis 已提交
141
1:	set_vbar vbar_el1, x0
D
David Feng 已提交
142 143 144
	mov	x0, #3 << 20
	msr	cpacr_el1, x0			/* Enable FP/SIMD */
0:
145
	isb
D
David Feng 已提交
146

147
	/*
148
	 * Enable SMPEN bit for coherency.
149 150 151 152
	 * This register is not architectural but at the moment
	 * this bit should be set for A53/A57/A72.
	 */
#ifdef CONFIG_ARMV8_SET_SMPEN
153 154
	switch_el x1, 3f, 1f, 1f
3:
155
	mrs     x0, S3_1_c15_c2_1               /* cpuectlr_el1 */
156 157
	orr     x0, x0, #0x40
	msr     S3_1_c15_c2_1, x0
158
	isb
159
1:
160 161
#endif

162 163 164
	/* Apply ARM core specific erratas */
	bl	apply_core_errata

165 166 167 168 169 170
	/*
	 * Cache/BPB/TLB Invalidate
	 * i-cache is invalidated before enabled in icache_enable()
	 * tlb is invalidated before mmu is enabled in dcache_enable()
	 * d-cache is invalidated before enabled in dcache_enable()
	 */
D
David Feng 已提交
171 172 173 174

	/* Processor specific initialization */
	bl	lowlevel_init

175
#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
176 177 178 179
	branch_if_master x0, x1, master_cpu
	b	spin_table_secondary_jump
	/* never return */
#elif defined(CONFIG_ARMV8_MULTIENTRY)
D
David Feng 已提交
180 181 182 183 184 185 186 187 188 189 190
	branch_if_master x0, x1, master_cpu

	/*
	 * Slave CPUs
	 */
slave_cpu:
	wfe
	ldr	x1, =CPU_RELEASE_ADDR
	ldr	x0, [x1]
	cbz	x0, slave_cpu
	br	x0			/* branch to the given address */
191
#endif /* CONFIG_ARMV8_MULTIENTRY */
192
master_cpu:
D
David Feng 已提交
193 194
	bl	_main

195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227
#ifdef CONFIG_SYS_RESET_SCTRL
reset_sctrl:
	switch_el x1, 3f, 2f, 1f
3:
	mrs	x0, sctlr_el3
	b	0f
2:
	mrs	x0, sctlr_el2
	b	0f
1:
	mrs	x0, sctlr_el1

0:
	ldr	x1, =0xfdfffffa
	and	x0, x0, x1

	switch_el x1, 6f, 5f, 4f
6:
	msr	sctlr_el3, x0
	b	7f
5:
	msr	sctlr_el2, x0
	b	7f
4:
	msr	sctlr_el1, x0

7:
	dsb	sy
	isb
	b	__asm_invalidate_tlb_all
	ret
#endif

D
David Feng 已提交
228 229
/*-----------------------------------------------------------------------*/

230 231 232
WEAK(apply_core_errata)

	mov	x29, lr			/* Save LR */
233 234 235 236
	/* For now, we support Cortex-A53, Cortex-A57 specific errata */

	/* Check if we are running on a Cortex-A53 core */
	branch_if_a53_core x0, apply_a53_core_errata
237 238 239 240 241 242 243

	/* Check if we are running on a Cortex-A57 core */
	branch_if_a57_core x0, apply_a57_core_errata
0:
	mov	lr, x29			/* Restore LR */
	ret

244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259
apply_a53_core_errata:

#ifdef CONFIG_ARM_ERRATA_855873
	mrs	x0, midr_el1
	tst	x0, #(0xf << 20)
	b.ne	0b

	mrs	x0, midr_el1
	and	x0, x0, #0xf
	cmp	x0, #3
	b.lt	0b

	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
	/* Enable data cache clean as data cache clean/invalidate */
	orr	x0, x0, #1 << 44
	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
260
	isb
261 262 263
#endif
	b 0b

264 265 266 267 268
apply_a57_core_errata:

#ifdef CONFIG_ARM_ERRATA_828024
	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
	/* Disable non-allocate hint of w-b-n-a memory type */
269
	orr	x0, x0, #1 << 49
270
	/* Disable write streaming no L1-allocate threshold */
271
	orr	x0, x0, #3 << 25
272
	/* Disable write streaming no-allocate threshold */
273
	orr	x0, x0, #3 << 27
274
	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
275
	isb
276 277 278 279 280
#endif

#ifdef CONFIG_ARM_ERRATA_826974
	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
	/* Disable speculative load execution ahead of a DMB */
281
	orr	x0, x0, #1 << 59
282
	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
283
	isb
284 285
#endif

286 287 288 289 290 291 292
#ifdef CONFIG_ARM_ERRATA_833471
	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
	/* FPSCR write flush.
	 * Note that in some cases where a flush is unnecessary this
	    could impact performance. */
	orr	x0, x0, #1 << 38
	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
293
	isb
294 295 296 297 298 299 300 301 302 303
#endif

#ifdef CONFIG_ARM_ERRATA_829520
	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
	/* Disable Indirect Predictor bit will prevent this erratum
	    from occurring
	 * Note that in some cases where a flush is unnecessary this
	    could impact performance. */
	orr	x0, x0, #1 << 4
	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
304
	isb
305 306
#endif

307 308 309 310 311
#ifdef CONFIG_ARM_ERRATA_833069
	mrs	x0, S3_1_c15_c2_0	/* cpuactlr_el1 */
	/* Disable Enable Invalidates of BTB bit */
	and	x0, x0, #0xE
	msr	S3_1_c15_c2_0, x0	/* cpuactlr_el1 */
312
	isb
313 314 315 316 317 318
#endif
	b 0b
ENDPROC(apply_core_errata)

/*-----------------------------------------------------------------------*/

D
David Feng 已提交
319 320 321
WEAK(lowlevel_init)
	mov	x29, lr			/* Save LR */

D
David Feng 已提交
322 323 324 325 326 327 328 329 330 331 332 333 334
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
	branch_if_slave x0, 1f
	ldr	x0, =GICD_BASE
	bl	gic_init_secure
1:
#if defined(CONFIG_GICV3)
	ldr	x0, =GICR_BASE
	bl	gic_init_secure_percpu
#elif defined(CONFIG_GICV2)
	ldr	x0, =GICD_BASE
	ldr	x1, =GICC_BASE
	bl	gic_init_secure_percpu
#endif
335
#endif
D
David Feng 已提交
336

337
#ifdef CONFIG_ARMV8_MULTIENTRY
D
David Feng 已提交
338
	branch_if_master x0, x1, 2f
D
David Feng 已提交
339 340 341 342 343 344

	/*
	 * Slave should wait for master clearing spin table.
	 * This sync prevent salves observing incorrect
	 * value of spin table and jumping to wrong place.
	 */
D
David Feng 已提交
345 346 347 348 349 350
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
#ifdef CONFIG_GICV2
	ldr	x0, =GICC_BASE
#endif
	bl	gic_wait_for_interrupt
#endif
D
David Feng 已提交
351 352

	/*
D
David Feng 已提交
353
	 * All slaves will enter EL2 and optionally EL1.
D
David Feng 已提交
354
	 */
355 356
	adr	x4, lowlevel_in_el2
	ldr	x5, =ES_TO_AARCH64
D
David Feng 已提交
357
	bl	armv8_switch_to_el2
358 359

lowlevel_in_el2:
D
David Feng 已提交
360
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
361 362
	adr	x4, lowlevel_in_el1
	ldr	x5, =ES_TO_AARCH64
D
David Feng 已提交
363
	bl	armv8_switch_to_el1
364 365

lowlevel_in_el1:
D
David Feng 已提交
366 367
#endif

368 369
#endif /* CONFIG_ARMV8_MULTIENTRY */

D
David Feng 已提交
370
2:
D
David Feng 已提交
371 372 373 374
	mov	lr, x29			/* Restore LR */
	ret
ENDPROC(lowlevel_init)

D
David Feng 已提交
375 376 377 378
WEAK(smp_kick_all_cpus)
	/* Kick secondary cpus up by SGI 0 interrupt */
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
	ldr	x0, =GICD_BASE
379
	b	gic_kick_secondary_cpus
D
David Feng 已提交
380 381 382 383
#endif
	ret
ENDPROC(smp_kick_all_cpus)

D
David Feng 已提交
384 385 386
/*-----------------------------------------------------------------------*/

ENTRY(c_runtime_cpu_setup)
387
#if defined(CONFIG_ARMV8_SPL_EXCEPTION_VECTORS) || !defined(CONFIG_SPL_BUILD)
D
David Feng 已提交
388 389 390 391 392 393 394 395 396
	/* Relocate vBAR */
	adr	x0, vectors
	switch_el x1, 3f, 2f, 1f
3:	msr	vbar_el3, x0
	b	0f
2:	msr	vbar_el2, x0
	b	0f
1:	msr	vbar_el1, x0
0:
397
#endif
D
David Feng 已提交
398 399 400

	ret
ENDPROC(c_runtime_cpu_setup)
401 402 403 404

WEAK(save_boot_params)
	b	save_boot_params_ret	/* back to my caller */
ENDPROC(save_boot_params)