提交 f6c62f1c 编写于 作者: M Michael Walle 提交者: Priyanka Jain

armv8: layerscape: move spin table into own module

Move it out of lowlevel.S into spintable.S. On layerscape, the secondary
CPUs are brought up in main u-boot. This will make it possible to only
compile the spin table code for the main u-boot and omit it in SPL.

This saves about 720 bytes in the SPL.
Signed-off-by: NMichael Walle <michael@walle.cc>
Reviewed-by: NPriyanka Jain <priyanka.jain@nxp.com>
上级 3d3fe8b1
......@@ -6,7 +6,7 @@ obj-y += cpu.o
obj-y += lowlevel.o
obj-y += soc.o
ifndef CONFIG_SPL_BUILD
obj-$(CONFIG_MP) += mp.o
obj-$(CONFIG_MP) += mp.o spintable.o
obj-$(CONFIG_OF_LIBFDT) += fdt.o
endif
obj-$(CONFIG_SPL) += spl.o
......
......@@ -11,14 +11,16 @@
#include <asm/gic.h>
#include <asm/macro.h>
#include <asm/arch-fsl-layerscape/soc.h>
#ifdef CONFIG_MP
#include <asm/arch/mp.h>
#endif
#ifdef CONFIG_FSL_LSCH3
#include <asm/arch-fsl-layerscape/immap_lsch3.h>
#endif
#include <asm/u-boot.h>
.align 3
.weak secondary_boot_addr
secondary_boot_addr:
.quad 0
/* Get GIC offset
* For LS1043a rev1.0, GIC base address align with 4k.
* For LS1043a rev1.1, if DCFG_GIC400_ALIGN[GIC_ADDR_BIT]
......@@ -424,149 +426,3 @@ ENTRY(__asm_flush_l3_dcache)
ret
ENDPROC(__asm_flush_l3_dcache)
#endif /* CONFIG_SYS_FSL_HAS_CCN504 */
#ifdef CONFIG_MP
.align 3
.global secondary_boot_addr
secondary_boot_addr:
.quad secondary_boot_func
/* Keep literals not used by the secondary boot code outside it */
.ltorg
/* Using 64 bit alignment since the spin table is accessed as data */
.align 4
.global secondary_boot_code
/* Secondary Boot Code starts here */
secondary_boot_code:
.global __spin_table
__spin_table:
.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
.align 2
ENTRY(secondary_boot_func)
/*
* MPIDR_EL1 Fields:
* MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
* MPIDR[7:2] = AFF0_RES
* MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
* MPIDR[23:16] = AFF2_CLUSTERID
* MPIDR[24] = MT
* MPIDR[29:25] = RES0
* MPIDR[30] = U
* MPIDR[31] = ME
* MPIDR[39:32] = AFF3
*
* Linear Processor ID (LPID) calculation from MPIDR_EL1:
* (We only use AFF0_CPUID and AFF1_CLUSTERID for now
* until AFF2_CLUSTERID and AFF3 have non-zero values)
*
* LPID = MPIDR[15:8] | MPIDR[1:0]
*/
mrs x0, mpidr_el1
ubfm x1, x0, #8, #15
ubfm x2, x0, #0, #1
orr x10, x2, x1, lsl #2 /* x10 has LPID */
ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
/*
* offset of the spin table element for this core from start of spin
* table (each elem is padded to 64 bytes)
*/
lsl x1, x10, #6
ldr x0, =__spin_table
/* physical address of this cpus spin table element */
add x11, x1, x0
ldr x0, =__real_cntfrq
ldr x0, [x0]
msr cntfrq_el0, x0 /* set with real frequency */
str x9, [x11, #16] /* LPID */
mov x4, #1
str x4, [x11, #8] /* STATUS */
dsb sy
slave_cpu:
wfe
ldr x0, [x11]
cbz x0, slave_cpu
#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
mrs x1, sctlr_el2
#else
mrs x1, sctlr_el1
#endif
tbz x1, #25, cpu_is_le
rev x0, x0 /* BE to LE conversion */
cpu_is_le:
ldr x5, [x11, #24]
cbz x5, 1f
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
adr x4, secondary_switch_to_el1
ldr x5, =ES_TO_AARCH64
#else
ldr x4, [x11]
ldr x5, =ES_TO_AARCH32
#endif
bl secondary_switch_to_el2
1:
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
adr x4, secondary_switch_to_el1
#else
ldr x4, [x11]
#endif
ldr x5, =ES_TO_AARCH64
bl secondary_switch_to_el2
ENDPROC(secondary_boot_func)
ENTRY(secondary_switch_to_el2)
switch_el x6, 1f, 0f, 0f
0: ret
1: armv8_switch_to_el2_m x4, x5, x6
ENDPROC(secondary_switch_to_el2)
ENTRY(secondary_switch_to_el1)
mrs x0, mpidr_el1
ubfm x1, x0, #8, #15
ubfm x2, x0, #0, #1
orr x10, x2, x1, lsl #2 /* x10 has LPID */
lsl x1, x10, #6
ldr x0, =__spin_table
/* physical address of this cpus spin table element */
add x11, x1, x0
ldr x4, [x11]
ldr x5, [x11, #24]
cbz x5, 2f
ldr x5, =ES_TO_AARCH32
bl switch_to_el1
2: ldr x5, =ES_TO_AARCH64
switch_to_el1:
switch_el x6, 0f, 1f, 0f
0: ret
1: armv8_switch_to_el1_m x4, x5, x6
ENDPROC(secondary_switch_to_el1)
/* Ensure that the literals used by the secondary boot code are
* assembled within it (this is required so that we can protect
* this area with a single memreserve region
*/
.ltorg
/* 64 bit alignment for elements accessed as data */
.align 4
.global __real_cntfrq
__real_cntfrq:
.quad COUNTER_FREQUENCY
.globl __secondary_boot_code_size
.type __secondary_boot_code_size, %object
/* Secondary Boot Code ends here */
__secondary_boot_code_size:
.quad .-secondary_boot_code
#endif
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* (C) Copyright 2014-2015 Freescale Semiconductor
* Copyright 2019 NXP
*/
#include <config.h>
#include <linux/linkage.h>
#include <asm/macro.h>
#include <asm/system.h>
#include <asm/arch/mp.h>
.align 3
.global secondary_boot_addr
secondary_boot_addr:
.quad secondary_boot_func
/* Keep literals not used by the secondary boot code outside it */
.ltorg
/* Using 64 bit alignment since the spin table is accessed as data */
.align 4
.global secondary_boot_code
/* Secondary Boot Code starts here */
secondary_boot_code:
.global __spin_table
__spin_table:
.space CONFIG_MAX_CPUS*SPIN_TABLE_ELEM_SIZE
.align 2
ENTRY(secondary_boot_func)
/*
* MPIDR_EL1 Fields:
* MPIDR[1:0] = AFF0_CPUID <- Core ID (0,1)
* MPIDR[7:2] = AFF0_RES
* MPIDR[15:8] = AFF1_CLUSTERID <- Cluster ID (0,1,2,3)
* MPIDR[23:16] = AFF2_CLUSTERID
* MPIDR[24] = MT
* MPIDR[29:25] = RES0
* MPIDR[30] = U
* MPIDR[31] = ME
* MPIDR[39:32] = AFF3
*
* Linear Processor ID (LPID) calculation from MPIDR_EL1:
* (We only use AFF0_CPUID and AFF1_CLUSTERID for now
* until AFF2_CLUSTERID and AFF3 have non-zero values)
*
* LPID = MPIDR[15:8] | MPIDR[1:0]
*/
mrs x0, mpidr_el1
ubfm x1, x0, #8, #15
ubfm x2, x0, #0, #1
orr x10, x2, x1, lsl #2 /* x10 has LPID */
ubfm x9, x0, #0, #15 /* x9 contains MPIDR[15:0] */
/*
* offset of the spin table element for this core from start of spin
* table (each elem is padded to 64 bytes)
*/
lsl x1, x10, #6
ldr x0, =__spin_table
/* physical address of this cpus spin table element */
add x11, x1, x0
ldr x0, =__real_cntfrq
ldr x0, [x0]
msr cntfrq_el0, x0 /* set with real frequency */
str x9, [x11, #16] /* LPID */
mov x4, #1
str x4, [x11, #8] /* STATUS */
dsb sy
slave_cpu:
wfe
ldr x0, [x11]
cbz x0, slave_cpu
#ifndef CONFIG_ARMV8_SWITCH_TO_EL1
mrs x1, sctlr_el2
#else
mrs x1, sctlr_el1
#endif
tbz x1, #25, cpu_is_le
rev x0, x0 /* BE to LE conversion */
cpu_is_le:
ldr x5, [x11, #24]
cbz x5, 1f
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
adr x4, secondary_switch_to_el1
ldr x5, =ES_TO_AARCH64
#else
ldr x4, [x11]
ldr x5, =ES_TO_AARCH32
#endif
bl secondary_switch_to_el2
1:
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
adr x4, secondary_switch_to_el1
#else
ldr x4, [x11]
#endif
ldr x5, =ES_TO_AARCH64
bl secondary_switch_to_el2
ENDPROC(secondary_boot_func)
ENTRY(secondary_switch_to_el2)
switch_el x6, 1f, 0f, 0f
0: ret
1: armv8_switch_to_el2_m x4, x5, x6
ENDPROC(secondary_switch_to_el2)
ENTRY(secondary_switch_to_el1)
mrs x0, mpidr_el1
ubfm x1, x0, #8, #15
ubfm x2, x0, #0, #1
orr x10, x2, x1, lsl #2 /* x10 has LPID */
lsl x1, x10, #6
ldr x0, =__spin_table
/* physical address of this cpus spin table element */
add x11, x1, x0
ldr x4, [x11]
ldr x5, [x11, #24]
cbz x5, 2f
ldr x5, =ES_TO_AARCH32
bl switch_to_el1
2: ldr x5, =ES_TO_AARCH64
switch_to_el1:
switch_el x6, 0f, 1f, 0f
0: ret
1: armv8_switch_to_el1_m x4, x5, x6
ENDPROC(secondary_switch_to_el1)
/* Ensure that the literals used by the secondary boot code are
* assembled within it (this is required so that we can protect
* this area with a single memreserve region
*/
.ltorg
/* 64 bit alignment for elements accessed as data */
.align 4
.global __real_cntfrq
__real_cntfrq:
.quad COUNTER_FREQUENCY
.globl __secondary_boot_code_size
.type __secondary_boot_code_size, %object
/* Secondary Boot Code ends here */
__secondary_boot_code_size:
.quad .-secondary_boot_code
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册