提交 c131623b 编写于 作者: J Jeremy Linton 提交者: Greg Kroah-Hartman

arm64: Always enable ssb vulnerability detection

[ Upstream commit d42281b6e49510f078ace15a8ea10f71e6262581 ]

Ensure we are always able to detect whether or not the CPU is affected
by SSB, so that we can later advertise this to userspace.
Signed-off-by: NJeremy Linton <jeremy.linton@arm.com>
Reviewed-by: NAndre Przywara <andre.przywara@arm.com>
Reviewed-by: NCatalin Marinas <catalin.marinas@arm.com>
Tested-by: NStefan Wahren <stefan.wahren@i2se.com>
[will: Use IS_ENABLED instead of #ifdef]
Signed-off-by: NWill Deacon <will.deacon@arm.com>
Signed-off-by: NArd Biesheuvel <ard.biesheuvel@linaro.org>
Signed-off-by: NGreg Kroah-Hartman <gregkh@linuxfoundation.org>
上级 47a11f2e
...@@ -525,11 +525,7 @@ static inline int arm64_get_ssbd_state(void) ...@@ -525,11 +525,7 @@ static inline int arm64_get_ssbd_state(void)
#endif #endif
} }
#ifdef CONFIG_ARM64_SSBD
void arm64_set_ssbd_mitigation(bool state); void arm64_set_ssbd_mitigation(bool state);
#else
static inline void arm64_set_ssbd_mitigation(bool state) {}
#endif
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
......
...@@ -239,7 +239,6 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry) ...@@ -239,7 +239,6 @@ enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
} }
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */ #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
#ifdef CONFIG_ARM64_SSBD
DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required); DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
int ssbd_state __read_mostly = ARM64_SSBD_KERNEL; int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
...@@ -312,6 +311,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt, ...@@ -312,6 +311,11 @@ void __init arm64_enable_wa2_handling(struct alt_instr *alt,
void arm64_set_ssbd_mitigation(bool state) void arm64_set_ssbd_mitigation(bool state)
{ {
if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
pr_info_once("SSBD disabled by kernel configuration\n");
return;
}
if (this_cpu_has_cap(ARM64_SSBS)) { if (this_cpu_has_cap(ARM64_SSBS)) {
if (state) if (state)
asm volatile(SET_PSTATE_SSBS(0)); asm volatile(SET_PSTATE_SSBS(0));
...@@ -431,7 +435,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry, ...@@ -431,7 +435,6 @@ static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
return required; return required;
} }
#endif /* CONFIG_ARM64_SSBD */
#ifdef CONFIG_ARM64_ERRATUM_1463225 #ifdef CONFIG_ARM64_ERRATUM_1463225
DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
...@@ -710,14 +713,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = { ...@@ -710,14 +713,12 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors), ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
}, },
#endif #endif
#ifdef CONFIG_ARM64_SSBD
{ {
.desc = "Speculative Store Bypass Disable", .desc = "Speculative Store Bypass Disable",
.capability = ARM64_SSBD, .capability = ARM64_SSBD,
.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
.matches = has_ssbd_mitigation, .matches = has_ssbd_mitigation,
}, },
#endif
#ifdef CONFIG_ARM64_ERRATUM_1463225 #ifdef CONFIG_ARM64_ERRATUM_1463225
{ {
.desc = "ARM erratum 1463225", .desc = "ARM erratum 1463225",
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册