提交 9d84ad42 编写于 作者: W Will Deacon

Merge branch 'for-next/trivial' into for-next/core

* for-next/trivial:
  arm64: alternatives: add __init/__initconst to some functions/variables
  arm64/asm: Remove unused assembler DAIF save/restore macros
  arm64/kpti: Move DAIF masking to C code
  Revert "arm64/mm: Drop redundant BUG_ON(!pgtable_alloc)"
  arm64/mm: Drop unused restore_ttbr1
  arm64: alternatives: make apply_alternatives_vdso() static
  arm64/mm: Drop idmap_pg_end[] declaration
  arm64/mm: Drop redundant BUG_ON(!pgtable_alloc)
  arm64: make is_ttbrX_addr() noinstr-safe
  arm64/signal: Document our convention for choosing magic numbers
  arm64: atomics: lse: remove stale dependency on JUMP_LABEL
  arm64: paravirt: remove conduit check in has_pv_steal_clock
  arm64: entry: Fix typo
  arm64/booting: Add missing colon to FA64 entry
  arm64/mm: Drop ARM64_KERNEL_USES_PMD_MAPS
  arm64/asm: Remove unused enable_da macro
...@@ -349,7 +349,7 @@ Before jumping into the kernel, the following conditions must be met: ...@@ -349,7 +349,7 @@ Before jumping into the kernel, the following conditions must be met:
- HWFGWTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01. - HWFGWTR_EL2.nSMPRI_EL1 (bit 54) must be initialised to 0b01.
For CPUs with the Scalable Matrix Extension FA64 feature (FEAT_SME_FA64) For CPUs with the Scalable Matrix Extension FA64 feature (FEAT_SME_FA64):
- If EL3 is present: - If EL3 is present:
......
...@@ -1737,7 +1737,6 @@ config ARM64_LSE_ATOMICS ...@@ -1737,7 +1737,6 @@ config ARM64_LSE_ATOMICS
config ARM64_USE_LSE_ATOMICS config ARM64_USE_LSE_ATOMICS
bool "Atomic instructions" bool "Atomic instructions"
depends on JUMP_LABEL
default y default y
help help
As part of the Large System Extensions, ARMv8.1 introduces new As part of the Large System Extensions, ARMv8.1 introduces new
......
...@@ -34,11 +34,6 @@ ...@@ -34,11 +34,6 @@
wx\n .req w\n wx\n .req w\n
.endr .endr
.macro save_and_disable_daif, flags
mrs \flags, daif
msr daifset, #0xf
.endm
.macro disable_daif .macro disable_daif
msr daifset, #0xf msr daifset, #0xf
.endm .endm
...@@ -47,15 +42,6 @@ ...@@ -47,15 +42,6 @@
msr daifclr, #0xf msr daifclr, #0xf
.endm .endm
.macro restore_daif, flags:req
msr daif, \flags
.endm
/* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
.macro enable_da
msr daifclr, #(8 | 4)
.endm
/* /*
* Save/restore interrupts. * Save/restore interrupts.
*/ */
...@@ -619,17 +605,6 @@ alternative_endif ...@@ -619,17 +605,6 @@ alternative_endif
#endif #endif
.endm .endm
/*
* Perform the reverse of offset_ttbr1.
* bic is used as it can cover the immediate value and, in future, won't need
* to be nop'ed out when dealing with 52-bit kernel VAs.
*/
.macro restore_ttbr1, ttbr
#ifdef CONFIG_ARM64_VA_BITS_52
bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
#endif
.endm
/* /*
* Arrange a physical address in a TTBR register, taking care of 52-bit * Arrange a physical address in a TTBR register, taking care of 52-bit
* addresses. * addresses.
......
...@@ -18,11 +18,6 @@ ...@@ -18,11 +18,6 @@
* with 4K (section size = 2M) but not with 16K (section size = 32M) or * with 4K (section size = 2M) but not with 16K (section size = 32M) or
* 64K (section size = 512M). * 64K (section size = 512M).
*/ */
#ifdef CONFIG_ARM64_4K_PAGES
#define ARM64_KERNEL_USES_PMD_MAPS 1
#else
#define ARM64_KERNEL_USES_PMD_MAPS 0
#endif
/* /*
* The idmap and swapper page tables need some space reserved in the kernel * The idmap and swapper page tables need some space reserved in the kernel
...@@ -34,7 +29,7 @@ ...@@ -34,7 +29,7 @@
* VA range, so pages required to map highest possible PA are reserved in all * VA range, so pages required to map highest possible PA are reserved in all
* cases. * cases.
*/ */
#if ARM64_KERNEL_USES_PMD_MAPS #ifdef CONFIG_ARM64_4K_PAGES
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1) #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
#else #else
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS) #define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS)
...@@ -96,7 +91,7 @@ ...@@ -96,7 +91,7 @@
#define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1) #define INIT_IDMAP_DIR_PAGES EARLY_PAGES(KIMAGE_VADDR, _end + MAX_FDT_SIZE + SWAPPER_BLOCK_SIZE, 1)
/* Initial memory map size */ /* Initial memory map size */
#if ARM64_KERNEL_USES_PMD_MAPS #ifdef CONFIG_ARM64_4K_PAGES
#define SWAPPER_BLOCK_SHIFT PMD_SHIFT #define SWAPPER_BLOCK_SHIFT PMD_SHIFT
#define SWAPPER_BLOCK_SIZE PMD_SIZE #define SWAPPER_BLOCK_SIZE PMD_SIZE
#define SWAPPER_TABLE_SHIFT PUD_SHIFT #define SWAPPER_TABLE_SHIFT PUD_SHIFT
...@@ -112,7 +107,7 @@ ...@@ -112,7 +107,7 @@
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED) #define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S) #define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
#if ARM64_KERNEL_USES_PMD_MAPS #ifdef CONFIG_ARM64_4K_PAGES
#define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS) #define SWAPPER_RW_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
#define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY) #define SWAPPER_RX_MMUFLAGS (SWAPPER_RW_MMUFLAGS | PMD_SECT_RDONLY)
#else #else
......
...@@ -10,7 +10,6 @@ ...@@ -10,7 +10,6 @@
#include <linux/compiler_types.h> #include <linux/compiler_types.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/jump_label.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/alternative-macros.h> #include <asm/alternative-macros.h>
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
#include <asm/daifflags.h>
#include <asm/proc-fns.h> #include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h> #include <asm-generic/mm_hooks.h>
#include <asm/cputype.h> #include <asm/cputype.h>
...@@ -152,6 +153,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) ...@@ -152,6 +153,7 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
typedef void (ttbr_replace_func)(phys_addr_t); typedef void (ttbr_replace_func)(phys_addr_t);
extern ttbr_replace_func idmap_cpu_replace_ttbr1; extern ttbr_replace_func idmap_cpu_replace_ttbr1;
ttbr_replace_func *replace_phys; ttbr_replace_func *replace_phys;
unsigned long daif;
/* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */ /* phys_to_ttbr() zeros lower 2 bits of ttbr with 52-bit PA */
phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp)); phys_addr_t ttbr1 = phys_to_ttbr(virt_to_phys(pgdp));
...@@ -171,7 +173,15 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap) ...@@ -171,7 +173,15 @@ static inline void cpu_replace_ttbr1(pgd_t *pgdp, pgd_t *idmap)
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1); replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
__cpu_install_idmap(idmap); __cpu_install_idmap(idmap);
/*
* We really don't want to take *any* exceptions while TTBR1 is
* in the process of being replaced so mask everything.
*/
daif = local_daif_save();
replace_phys(ttbr1); replace_phys(ttbr1);
local_daif_restore(daif);
cpu_uninstall_idmap(); cpu_uninstall_idmap();
} }
......
...@@ -609,7 +609,6 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD]; ...@@ -609,7 +609,6 @@ extern pgd_t init_pg_dir[PTRS_PER_PGD];
extern pgd_t init_pg_end[]; extern pgd_t init_pg_end[];
extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_dir[PTRS_PER_PGD]; extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
extern pgd_t idmap_pg_end[];
extern pgd_t tramp_pg_dir[PTRS_PER_PGD]; extern pgd_t tramp_pg_dir[PTRS_PER_PGD];
extern pgd_t reserved_pg_dir[PTRS_PER_PGD]; extern pgd_t reserved_pg_dir[PTRS_PER_PGD];
......
...@@ -315,13 +315,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc, ...@@ -315,13 +315,13 @@ static inline void compat_start_thread(struct pt_regs *regs, unsigned long pc,
} }
#endif #endif
static inline bool is_ttbr0_addr(unsigned long addr) static __always_inline bool is_ttbr0_addr(unsigned long addr)
{ {
/* entry assembly clears tags for TTBR0 addrs */ /* entry assembly clears tags for TTBR0 addrs */
return addr < TASK_SIZE; return addr < TASK_SIZE;
} }
static inline bool is_ttbr1_addr(unsigned long addr) static __always_inline bool is_ttbr1_addr(unsigned long addr)
{ {
/* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; return arch_kasan_reset_tag(addr) >= PAGE_OFFSET;
......
...@@ -62,6 +62,10 @@ struct sigcontext { ...@@ -62,6 +62,10 @@ struct sigcontext {
* context. Such structures must be placed after the rt_sigframe on the stack * context. Such structures must be placed after the rt_sigframe on the stack
* and be 16-byte aligned. The last structure must be a dummy one with the * and be 16-byte aligned. The last structure must be a dummy one with the
* magic and size set to 0. * magic and size set to 0.
*
* Note that the values allocated for use as magic should be chosen to
* be meaningful in ASCII to aid manual parsing, ZA doesn't follow this
* convention due to oversight but it should be observed for future additions.
*/ */
struct _aarch64_ctx { struct _aarch64_ctx {
__u32 magic; __u32 magic;
......
...@@ -196,7 +196,7 @@ static void __apply_alternatives(const struct alt_region *region, ...@@ -196,7 +196,7 @@ static void __apply_alternatives(const struct alt_region *region,
} }
} }
void apply_alternatives_vdso(void) static void __init apply_alternatives_vdso(void)
{ {
struct alt_region region; struct alt_region region;
const struct elf64_hdr *hdr; const struct elf64_hdr *hdr;
...@@ -220,7 +220,7 @@ void apply_alternatives_vdso(void) ...@@ -220,7 +220,7 @@ void apply_alternatives_vdso(void)
__apply_alternatives(&region, false, &all_capabilities[0]); __apply_alternatives(&region, false, &all_capabilities[0]);
} }
static const struct alt_region kernel_alternatives = { static const struct alt_region kernel_alternatives __initconst = {
.begin = (struct alt_instr *)__alt_instructions, .begin = (struct alt_instr *)__alt_instructions,
.end = (struct alt_instr *)__alt_instructions_end, .end = (struct alt_instr *)__alt_instructions_end,
}; };
...@@ -229,7 +229,7 @@ static const struct alt_region kernel_alternatives = { ...@@ -229,7 +229,7 @@ static const struct alt_region kernel_alternatives = {
* We might be patching the stop_machine state machine, so implement a * We might be patching the stop_machine state machine, so implement a
* really simple polling protocol here. * really simple polling protocol here.
*/ */
static int __apply_alternatives_multi_stop(void *unused) static int __init __apply_alternatives_multi_stop(void *unused)
{ {
/* We always have a CPU 0 at this point (__init) */ /* We always have a CPU 0 at this point (__init) */
if (smp_processor_id()) { if (smp_processor_id()) {
......
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
/* /*
* Handle IRQ/context state management when entering from kernel mode. * Handle IRQ/context state management when entering from kernel mode.
* Before this function is called it is not safe to call regular kernel code, * Before this function is called it is not safe to call regular kernel code,
* intrumentable code, or any code which may trigger an exception. * instrumentable code, or any code which may trigger an exception.
* *
* This is intended to match the logic in irqentry_enter(), handling the kernel * This is intended to match the logic in irqentry_enter(), handling the kernel
* mode transitions only. * mode transitions only.
...@@ -63,7 +63,7 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs) ...@@ -63,7 +63,7 @@ static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
/* /*
* Handle IRQ/context state management when exiting to kernel mode. * Handle IRQ/context state management when exiting to kernel mode.
* After this function returns it is not safe to call regular kernel code, * After this function returns it is not safe to call regular kernel code,
* intrumentable code, or any code which may trigger an exception. * instrumentable code, or any code which may trigger an exception.
* *
* This is intended to match the logic in irqentry_exit(), handling the kernel * This is intended to match the logic in irqentry_exit(), handling the kernel
* mode transitions only, and with preemption handled elsewhere. * mode transitions only, and with preemption handled elsewhere.
...@@ -97,7 +97,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs) ...@@ -97,7 +97,7 @@ static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
/* /*
* Handle IRQ/context state management when entering from user mode. * Handle IRQ/context state management when entering from user mode.
* Before this function is called it is not safe to call regular kernel code, * Before this function is called it is not safe to call regular kernel code,
* intrumentable code, or any code which may trigger an exception. * instrumentable code, or any code which may trigger an exception.
*/ */
static __always_inline void __enter_from_user_mode(void) static __always_inline void __enter_from_user_mode(void)
{ {
...@@ -116,7 +116,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs) ...@@ -116,7 +116,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs)
/* /*
* Handle IRQ/context state management when exiting to user mode. * Handle IRQ/context state management when exiting to user mode.
* After this function returns it is not safe to call regular kernel code, * After this function returns it is not safe to call regular kernel code,
* intrumentable code, or any code which may trigger an exception. * instrumentable code, or any code which may trigger an exception.
*/ */
static __always_inline void __exit_to_user_mode(void) static __always_inline void __exit_to_user_mode(void)
{ {
...@@ -152,7 +152,7 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs) ...@@ -152,7 +152,7 @@ asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
/* /*
* Handle IRQ/context state management when entering an NMI from user/kernel * Handle IRQ/context state management when entering an NMI from user/kernel
* mode. Before this function is called it is not safe to call regular kernel * mode. Before this function is called it is not safe to call regular kernel
* code, intrumentable code, or any code which may trigger an exception. * code, instrumentable code, or any code which may trigger an exception.
*/ */
static void noinstr arm64_enter_nmi(struct pt_regs *regs) static void noinstr arm64_enter_nmi(struct pt_regs *regs)
{ {
...@@ -170,7 +170,7 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs) ...@@ -170,7 +170,7 @@ static void noinstr arm64_enter_nmi(struct pt_regs *regs)
/* /*
* Handle IRQ/context state management when exiting an NMI from user/kernel * Handle IRQ/context state management when exiting an NMI from user/kernel
* mode. After this function returns it is not safe to call regular kernel * mode. After this function returns it is not safe to call regular kernel
* code, intrumentable code, or any code which may trigger an exception. * code, instrumentable code, or any code which may trigger an exception.
*/ */
static void noinstr arm64_exit_nmi(struct pt_regs *regs) static void noinstr arm64_exit_nmi(struct pt_regs *regs)
{ {
...@@ -192,7 +192,7 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs) ...@@ -192,7 +192,7 @@ static void noinstr arm64_exit_nmi(struct pt_regs *regs)
/* /*
* Handle IRQ/context state management when entering a debug exception from * Handle IRQ/context state management when entering a debug exception from
* kernel mode. Before this function is called it is not safe to call regular * kernel mode. Before this function is called it is not safe to call regular
* kernel code, intrumentable code, or any code which may trigger an exception. * kernel code, instrumentable code, or any code which may trigger an exception.
*/ */
static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
{ {
...@@ -207,7 +207,7 @@ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs) ...@@ -207,7 +207,7 @@ static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
/* /*
* Handle IRQ/context state management when exiting a debug exception from * Handle IRQ/context state management when exiting a debug exception from
* kernel mode. After this function returns it is not safe to call regular * kernel mode. After this function returns it is not safe to call regular
* kernel code, intrumentable code, or any code which may trigger an exception. * kernel code, instrumentable code, or any code which may trigger an exception.
*/ */
static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs) static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
{ {
......
...@@ -141,10 +141,6 @@ static bool __init has_pv_steal_clock(void) ...@@ -141,10 +141,6 @@ static bool __init has_pv_steal_clock(void)
{ {
struct arm_smccc_res res; struct arm_smccc_res res;
/* To detect the presence of PV time support we require SMCCC 1.1+ */
if (arm_smccc_1_1_get_conduit() == SMCCC_CONDUIT_NONE)
return false;
arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID, arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
ARM_SMCCC_HV_PV_TIME_FEATURES, &res); ARM_SMCCC_HV_PV_TIME_FEATURES, &res);
......
...@@ -1196,7 +1196,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, ...@@ -1196,7 +1196,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
if (!ARM64_KERNEL_USES_PMD_MAPS) if (!IS_ENABLED(CONFIG_ARM64_4K_PAGES))
return vmemmap_populate_basepages(start, end, node, altmap); return vmemmap_populate_basepages(start, end, node, altmap);
do { do {
......
...@@ -189,16 +189,12 @@ SYM_FUNC_END(cpu_do_resume) ...@@ -189,16 +189,12 @@ SYM_FUNC_END(cpu_do_resume)
* called by anything else. It can only be executed from a TTBR0 mapping. * called by anything else. It can only be executed from a TTBR0 mapping.
*/ */
SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1)
save_and_disable_daif flags=x2
__idmap_cpu_set_reserved_ttbr1 x1, x3 __idmap_cpu_set_reserved_ttbr1 x1, x3
offset_ttbr1 x0, x3 offset_ttbr1 x0, x3
msr ttbr1_el1, x0 msr ttbr1_el1, x0
isb isb
restore_daif x2
ret ret
SYM_FUNC_END(idmap_cpu_replace_ttbr1) SYM_FUNC_END(idmap_cpu_replace_ttbr1)
.popsection .popsection
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册