提交 dd6df64c 编写于 作者: M Michael Walle 提交者: Priyanka Jain

armv8: layerscape: rework spin table

There are two issues:

 (1) The spin table doesn't convert the endianness of the jump address.
     Although there is code for it, the result isn't used at all (x0).
 (2) If something goes wrong, the function returns. But that doesn't
     make sense at all.

Use the actual converted jump address as destination to fix. If
there is an error, jump to a trap loop. And rearrange the code exception
level switching code to make it smaller and clearer.

This reduces the size of the spin table code section from 696 bytes to
424 bytes. If CONFIG_ARMV8_SWITCH_TO_EL1 the code size reduced from 696
bytes to 632 bytes.
Signed-off-by: NMichael Walle <michael@walle.cc>
Reviewed-by: NPriyanka Jain <priyanka.jain@nxp.com>
上级 16863da8
...@@ -31,7 +31,7 @@ __spin_table: ...@@ -31,7 +31,7 @@ __spin_table:
.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE .space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
.align 2 .align 2
ENTRY(__secondary_boot_func) __secondary_boot_func:
/* /*
* MPIDR_EL1 Fields: * MPIDR_EL1 Fields:
* MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1) * MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
...@@ -72,73 +72,36 @@ ENTRY(__secondary_boot_func) ...@@ -72,73 +72,36 @@ ENTRY(__secondary_boot_func)
str x4, [x11, #8] /* STATUS */ str x4, [x11, #8] /* STATUS */
dsb sy dsb sy
slave_cpu: 1:
wfe wfe
ldr x0, [x11] ldr x4, [x11]
cbz x0, slave_cpu cbz x4, 1b
#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
mrs x1, sctlr_el2 mrs x1, sctlr_el2
#else tbz x1, #25, 2f
mrs x1, sctlr_el1 rev x4, x4 /* BE to LE conversion */
#endif 2:
tbz x1, #25, cpu_is_le ldr x6, =ES_TO_AARCH64
rev x0, x0 /* BE to LE conversion */
cpu_is_le:
ldr x5, [x11, #24]
cbz x5, 1f
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
adr x4, secondary_switch_to_el1 adr x5, 3f
ldr x5, =ES_TO_AARCH64 switch_el x7, 0f, _dead_loop, _dead_loop
#else 0: armv8_switch_to_el2_m x5, x6, x7
ldr x4, [x11]
ldr x5, =ES_TO_AARCH32
#endif #endif
bl secondary_switch_to_el2 3:
ldr x7, [x11, #24] /* ARCH_COMP */
1: cbz x7, 4f
ldr x6, =ES_TO_AARCH32
4:
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1 #ifdef CONFIG_ARMV8_SWITCH_TO_EL1
adr x4, secondary_switch_to_el1 switch_el x7, _dead_loop, 0f, _dead_loop
0: armv8_switch_to_el1_m x4, x6, x7
#else #else
ldr x4, [x11] switch_el x7, 0f, _dead_loop, _dead_loop
0: armv8_switch_to_el2_m x4, x6, x7
#endif #endif
ldr x5, =ES_TO_AARCH64
bl secondary_switch_to_el2
ENDPROC(__secondary_boot_func)
ENTRY(secondary_switch_to_el2)
switch_el x6, 1f, 0f, 0f
0: ret
1: armv8_switch_to_el2_m x4, x5, x6
ENDPROC(secondary_switch_to_el2)
ENTRY(secondary_switch_to_el1)
mrs x0, mpidr_el1
ubfm x1, x0, #8, #15
ubfm x2, x0, #0, #1
orr x10, x2, x1, lsl #2 /* x10 has LPID */
lsl x1, x10, #6
adr x0, __spin_table
/* physical address of this cpus spin table element */
add x11, x1, x0
ldr x4, [x11]
ldr x5, [x11, #24]
cbz x5, 2f
ldr x5, =ES_TO_AARCH32 _dead_loop:
bl switch_to_el1 wfe
b _dead_loop
2: ldr x5, =ES_TO_AARCH64
switch_to_el1:
switch_el x6, 0f, 1f, 0f
0: ret
1: armv8_switch_to_el1_m x4, x5, x6
ENDPROC(secondary_switch_to_el1)
/* Ensure that the literals used by the secondary boot code are /* Ensure that the literals used by the secondary boot code are
* assembled within it (this is required so that we can protect * assembled within it (this is required so that we can protect
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册