提交 b6113038 编写于 作者: J James Morse 提交者: Will Deacon

arm64: vmlinux.ld: Add mmuoff data sections and move mmuoff text into idmap

Resume from hibernate needs to clean any text executed by the kernel with
the MMU off to the PoC. Collect these functions together into the
.idmap.text section as all this code is tightly coupled and also needs
the same cleaning after resume.

Data is more complicated, secondary_holding_pen_release is written with
the MMU on, clean and invalidated, then read with the MMU off. In contrast
__boot_cpu_mode is written with the MMU off, the corresponding cache line
is invalidated, so when we read it with the MMU on we don't get stale data.
These cache maintenance operations conflict with each other if the values
are within a Cache Writeback Granule (CWG) of each other.
Collect the data into two sections .mmuoff.data.read and .mmuoff.data.write,
the linker script ensures mmuoff.data.write section is aligned to the
architectural maximum CWG of 2KB.
Signed-off-by: NJames Morse <james.morse@arm.com>
Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: NCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: NWill Deacon <will.deacon@arm.com>
上级 ee78fdc7
...@@ -25,5 +25,6 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; ...@@ -25,5 +25,6 @@ extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
extern char __hyp_text_start[], __hyp_text_end[]; extern char __hyp_text_start[], __hyp_text_end[];
extern char __idmap_text_start[], __idmap_text_end[]; extern char __idmap_text_start[], __idmap_text_end[];
extern char __irqentry_text_start[], __irqentry_text_end[]; extern char __irqentry_text_start[], __irqentry_text_end[];
extern char __mmuoff_data_start[], __mmuoff_data_end[];
#endif /* __ASM_SECTIONS_H */ #endif /* __ASM_SECTIONS_H */
...@@ -463,7 +463,7 @@ ENDPROC(__primary_switched) ...@@ -463,7 +463,7 @@ ENDPROC(__primary_switched)
* end early head section, begin head code that is also used for * end early head section, begin head code that is also used for
* hotplug and needs to have the same protections as the text region * hotplug and needs to have the same protections as the text region
*/ */
.section ".text","ax" .section ".idmap.text","ax"
ENTRY(kimage_vaddr) ENTRY(kimage_vaddr)
.quad _text - TEXT_OFFSET .quad _text - TEXT_OFFSET
...@@ -618,6 +618,13 @@ set_cpu_boot_mode_flag: ...@@ -618,6 +618,13 @@ set_cpu_boot_mode_flag:
ret ret
ENDPROC(set_cpu_boot_mode_flag) ENDPROC(set_cpu_boot_mode_flag)
/*
* These values are written with the MMU off, but read with the MMU on.
* Writers will invalidate the corresponding address, discarding up to a
* 'Cache Writeback Granule' (CWG) worth of data. The linker script ensures
* sufficient alignment that the CWG doesn't overlap another section.
*/
.pushsection ".mmuoff.data.write", "aw"
/* /*
* We need to find out the CPU boot mode long after boot, so we need to * We need to find out the CPU boot mode long after boot, so we need to
* store it in a writable variable. * store it in a writable variable.
...@@ -625,11 +632,16 @@ ENDPROC(set_cpu_boot_mode_flag) ...@@ -625,11 +632,16 @@ ENDPROC(set_cpu_boot_mode_flag)
* This is not in .bss, because we set it sufficiently early that the boot-time * This is not in .bss, because we set it sufficiently early that the boot-time
* zeroing of .bss would clobber it. * zeroing of .bss would clobber it.
*/ */
.pushsection .data..cacheline_aligned
.align L1_CACHE_SHIFT
ENTRY(__boot_cpu_mode) ENTRY(__boot_cpu_mode)
.long BOOT_CPU_MODE_EL2 .long BOOT_CPU_MODE_EL2
.long BOOT_CPU_MODE_EL1 .long BOOT_CPU_MODE_EL1
/*
* The booting CPU updates the failed status @__early_cpu_boot_status,
* with MMU turned off.
*/
ENTRY(__early_cpu_boot_status)
.long 0
.popsection .popsection
/* /*
...@@ -702,12 +714,6 @@ ENDPROC(__secondary_switched) ...@@ -702,12 +714,6 @@ ENDPROC(__secondary_switched)
dc ivac, \tmp1 // Invalidate potentially stale cache line dc ivac, \tmp1 // Invalidate potentially stale cache line
.endm .endm
.pushsection .data..cacheline_aligned
.align L1_CACHE_SHIFT
ENTRY(__early_cpu_boot_status)
.long 0
.popsection
/* /*
* Enable the MMU. * Enable the MMU.
* *
...@@ -719,7 +725,6 @@ ENTRY(__early_cpu_boot_status) ...@@ -719,7 +725,6 @@ ENTRY(__early_cpu_boot_status)
* Checks if the selected granule size is supported by the CPU. * Checks if the selected granule size is supported by the CPU.
* If it isn't, park the CPU * If it isn't, park the CPU
*/ */
.section ".idmap.text", "ax"
ENTRY(__enable_mmu) ENTRY(__enable_mmu)
mrs x22, sctlr_el1 // preserve old SCTLR_EL1 value mrs x22, sctlr_el1 // preserve old SCTLR_EL1 value
mrs x1, ID_AA64MMFR0_EL1 mrs x1, ID_AA64MMFR0_EL1
......
...@@ -97,6 +97,7 @@ ENTRY(__cpu_suspend_enter) ...@@ -97,6 +97,7 @@ ENTRY(__cpu_suspend_enter)
ENDPROC(__cpu_suspend_enter) ENDPROC(__cpu_suspend_enter)
.ltorg .ltorg
.pushsection ".idmap.text", "ax"
ENTRY(cpu_resume) ENTRY(cpu_resume)
bl el2_setup // if in EL2 drop to EL1 cleanly bl el2_setup // if in EL2 drop to EL1 cleanly
/* enable the MMU early - so we can access sleep_save_stash by va */ /* enable the MMU early - so we can access sleep_save_stash by va */
...@@ -105,7 +106,6 @@ ENTRY(cpu_resume) ...@@ -105,7 +106,6 @@ ENTRY(cpu_resume)
b __cpu_setup b __cpu_setup
ENDPROC(cpu_resume) ENDPROC(cpu_resume)
.pushsection ".idmap.text", "ax"
_resume_switched: _resume_switched:
ldr x8, =_cpu_resume ldr x8, =_cpu_resume
br x8 br x8
......
...@@ -29,7 +29,8 @@ ...@@ -29,7 +29,8 @@
#include <asm/smp_plat.h> #include <asm/smp_plat.h>
extern void secondary_holding_pen(void); extern void secondary_holding_pen(void);
volatile unsigned long secondary_holding_pen_release = INVALID_HWID; volatile unsigned long __section(".mmuoff.data.read")
secondary_holding_pen_release = INVALID_HWID;
static phys_addr_t cpu_release_addr[NR_CPUS]; static phys_addr_t cpu_release_addr[NR_CPUS];
......
...@@ -185,6 +185,25 @@ SECTIONS ...@@ -185,6 +185,25 @@ SECTIONS
_data = .; _data = .;
_sdata = .; _sdata = .;
RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE) RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
/*
* Data written with the MMU off but read with the MMU on requires
* cache lines to be invalidated, discarding up to a Cache Writeback
* Granule (CWG) of data from the cache. Keep the section that
* requires this type of maintenance to be in its own Cache Writeback
* Granule (CWG) area so the cache maintenance operations don't
* interfere with adjacent data.
*/
.mmuoff.data.write : ALIGN(SZ_2K) {
__mmuoff_data_start = .;
*(.mmuoff.data.write)
}
. = ALIGN(SZ_2K);
.mmuoff.data.read : {
*(.mmuoff.data.read)
__mmuoff_data_end = .;
}
PECOFF_EDATA_PADDING PECOFF_EDATA_PADDING
_edata = .; _edata = .;
......
...@@ -83,6 +83,7 @@ ENDPROC(cpu_do_suspend) ...@@ -83,6 +83,7 @@ ENDPROC(cpu_do_suspend)
* *
* x0: Address of context pointer * x0: Address of context pointer
*/ */
.pushsection ".idmap.text", "ax"
ENTRY(cpu_do_resume) ENTRY(cpu_do_resume)
ldp x2, x3, [x0] ldp x2, x3, [x0]
ldp x4, x5, [x0, #16] ldp x4, x5, [x0, #16]
...@@ -111,6 +112,7 @@ ENTRY(cpu_do_resume) ...@@ -111,6 +112,7 @@ ENTRY(cpu_do_resume)
isb isb
ret ret
ENDPROC(cpu_do_resume) ENDPROC(cpu_do_resume)
.popsection
#endif #endif
/* /*
...@@ -172,6 +174,7 @@ ENDPROC(idmap_cpu_replace_ttbr1) ...@@ -172,6 +174,7 @@ ENDPROC(idmap_cpu_replace_ttbr1)
* Initialise the processor for turning the MMU on. Return in x0 the * Initialise the processor for turning the MMU on. Return in x0 the
* value of the SCTLR_EL1 register. * value of the SCTLR_EL1 register.
*/ */
.pushsection ".idmap.text", "ax"
ENTRY(__cpu_setup) ENTRY(__cpu_setup)
tlbi vmalle1 // Invalidate local TLB tlbi vmalle1 // Invalidate local TLB
dsb nsh dsb nsh
...@@ -257,3 +260,4 @@ ENDPROC(__cpu_setup) ...@@ -257,3 +260,4 @@ ENDPROC(__cpu_setup)
crval: crval:
.word 0xfcffffff // clear .word 0xfcffffff // clear
.word 0x34d5d91d // set .word 0x34d5d91d // set
.popsection
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册