提交 2a6dcb2b 编写于 作者: J James Morse 提交者: Will Deacon

arm64: cpufeature: Schedule enable() calls instead of calling them via IPI

The enable() call for a cpufeature/errata is called using on_each_cpu().
This issues a cross-call IPI to get the work done. Implicitly, this
stashes the running PSTATE in SPSR when the CPU receives the IPI, and
restores it when we return. This means an enable() call can never modify
PSTATE.

To allow PAN to do this, change the on_each_cpu() call to use
stop_machine(). This schedules the work on each CPU which allows
us to modify PSTATE.

This involves changing the protype of all the enable() functions.

enable_cpu_capabilities() is called during boot and enables the feature
on all online CPUs. This path now uses stop_machine(). CPU features for
hotplug'd CPUs are enabled by verify_local_cpu_features() which only
acts on the local CPU, and can already modify the running PSTATE as it
is called from secondary_start_kernel().
Reported-by: NTony Thompson <anthony.thompson@arm.com>
Reported-by: NVladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: NJames Morse <james.morse@arm.com>
Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: NWill Deacon <will.deacon@arm.com>
上级 87261d19
...@@ -94,7 +94,7 @@ struct arm64_cpu_capabilities { ...@@ -94,7 +94,7 @@ struct arm64_cpu_capabilities {
u16 capability; u16 capability;
int def_scope; /* default scope */ int def_scope; /* default scope */
bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope);
void (*enable)(void *); /* Called on all active CPUs */ int (*enable)(void *); /* Called on all active CPUs */
union { union {
struct { /* To be used for erratum handling only */ struct { /* To be used for erratum handling only */
u32 midr_model; u32 midr_model;
......
...@@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr) ...@@ -188,8 +188,8 @@ static inline void spin_lock_prefetch(const void *ptr)
#endif #endif
void cpu_enable_pan(void *__unused); int cpu_enable_pan(void *__unused);
void cpu_enable_uao(void *__unused); int cpu_enable_uao(void *__unused);
void cpu_enable_cache_maint_trap(void *__unused); int cpu_enable_cache_maint_trap(void *__unused);
#endif /* __ASM_PROCESSOR_H */ #endif /* __ASM_PROCESSOR_H */
...@@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry, ...@@ -39,10 +39,11 @@ has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask); (arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
} }
static void cpu_enable_trap_ctr_access(void *__unused) static int cpu_enable_trap_ctr_access(void *__unused)
{ {
/* Clear SCTLR_EL1.UCT */ /* Clear SCTLR_EL1.UCT */
config_sctlr_el1(SCTLR_EL1_UCT, 0); config_sctlr_el1(SCTLR_EL1_UCT, 0);
return 0;
} }
#define MIDR_RANGE(model, min, max) \ #define MIDR_RANGE(model, min, max) \
......
...@@ -19,7 +19,9 @@ ...@@ -19,7 +19,9 @@
#define pr_fmt(fmt) "CPU features: " fmt #define pr_fmt(fmt) "CPU features: " fmt
#include <linux/bsearch.h> #include <linux/bsearch.h>
#include <linux/cpumask.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
...@@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps) ...@@ -941,7 +943,13 @@ void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
{ {
for (; caps->matches; caps++) for (; caps->matches; caps++)
if (caps->enable && cpus_have_cap(caps->capability)) if (caps->enable && cpus_have_cap(caps->capability))
on_each_cpu(caps->enable, NULL, true); /*
* Use stop_machine() as it schedules the work allowing
* us to modify PSTATE, instead of on_each_cpu() which
* uses an IPI, giving us a PSTATE that disappears when
* we return.
*/
stop_machine(caps->enable, NULL, cpu_online_mask);
} }
/* /*
......
...@@ -428,9 +428,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) ...@@ -428,9 +428,10 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
} }
void cpu_enable_cache_maint_trap(void *__unused) int cpu_enable_cache_maint_trap(void *__unused)
{ {
config_sctlr_el1(SCTLR_EL1_UCI, 0); config_sctlr_el1(SCTLR_EL1_UCI, 0);
return 0;
} }
#define __user_cache_maint(insn, address, res) \ #define __user_cache_maint(insn, address, res) \
......
...@@ -670,9 +670,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr, ...@@ -670,9 +670,10 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
NOKPROBE_SYMBOL(do_debug_exception); NOKPROBE_SYMBOL(do_debug_exception);
#ifdef CONFIG_ARM64_PAN #ifdef CONFIG_ARM64_PAN
void cpu_enable_pan(void *__unused) int cpu_enable_pan(void *__unused)
{ {
config_sctlr_el1(SCTLR_EL1_SPAN, 0); config_sctlr_el1(SCTLR_EL1_SPAN, 0);
return 0;
} }
#endif /* CONFIG_ARM64_PAN */ #endif /* CONFIG_ARM64_PAN */
...@@ -683,8 +684,9 @@ void cpu_enable_pan(void *__unused) ...@@ -683,8 +684,9 @@ void cpu_enable_pan(void *__unused)
* We need to enable the feature at runtime (instead of adding it to * We need to enable the feature at runtime (instead of adding it to
* PSR_MODE_EL1h) as the feature may not be implemented by the cpu. * PSR_MODE_EL1h) as the feature may not be implemented by the cpu.
*/ */
void cpu_enable_uao(void *__unused) int cpu_enable_uao(void *__unused)
{ {
asm(SET_PSTATE_UAO(1)); asm(SET_PSTATE_UAO(1));
return 0;
} }
#endif /* CONFIG_ARM64_UAO */ #endif /* CONFIG_ARM64_UAO */
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册