提交 0f15adbb 编写于 作者: W Will Deacon 提交者: Catalin Marinas

arm64: Add skeleton to harden the branch predictor against aliasing attacks

Aliasing attacks against CPU branch predictors can allow an attacker to
redirect speculative control flow on some CPUs and potentially divulge
information from one context to another.

This patch adds initial skeleton code behind a new Kconfig option to
enable implementation-specific mitigations against these attacks for
CPUs that are affected.
Co-developed-by: NMarc Zyngier <marc.zyngier@arm.com>
Signed-off-by: NWill Deacon <will.deacon@arm.com>
Signed-off-by: NCatalin Marinas <catalin.marinas@arm.com>
上级 95e3de35
...@@ -874,6 +874,23 @@ config UNMAP_KERNEL_AT_EL0 ...@@ -874,6 +874,23 @@ config UNMAP_KERNEL_AT_EL0
If unsure, say Y. If unsure, say Y.
config HARDEN_BRANCH_PREDICTOR
bool "Harden the branch predictor against aliasing attacks" if EXPERT
default y
help
Speculation attacks against some high-performance processors rely on
being able to manipulate the branch predictor for a victim context by
executing aliasing branches in the attacker context. Such attacks
can be partially mitigated against by clearing internal branch
predictor state and limiting the prediction logic in some situations.
This config option will take CPU-specific actions to harden the
branch predictor against aliasing attacks and may rely on specific
instruction sequences or control bits being set by the system
firmware.
If unsure, say Y.
menuconfig ARMV8_DEPRECATED menuconfig ARMV8_DEPRECATED
bool "Emulate deprecated/obsolete ARMv8 instructions" bool "Emulate deprecated/obsolete ARMv8 instructions"
depends on COMPAT depends on COMPAT
......
...@@ -42,7 +42,8 @@ ...@@ -42,7 +42,8 @@
#define ARM64_HAS_DCPOP 21 #define ARM64_HAS_DCPOP 21
#define ARM64_SVE 22 #define ARM64_SVE 22
#define ARM64_UNMAP_KERNEL_AT_EL0 23 #define ARM64_UNMAP_KERNEL_AT_EL0 23
#define ARM64_HARDEN_BRANCH_PREDICTOR 24
#define ARM64_NCAPS 24 #define ARM64_NCAPS 25
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -41,6 +41,43 @@ static inline bool arm64_kernel_unmapped_at_el0(void) ...@@ -41,6 +41,43 @@ static inline bool arm64_kernel_unmapped_at_el0(void)
cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0); cpus_have_const_cap(ARM64_UNMAP_KERNEL_AT_EL0);
} }
typedef void (*bp_hardening_cb_t)(void);
struct bp_hardening_data {
int hyp_vectors_slot;
bp_hardening_cb_t fn;
};
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
{
return this_cpu_ptr(&bp_hardening_data);
}
static inline void arm64_apply_bp_hardening(void)
{
struct bp_hardening_data *d;
if (!cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR))
return;
d = arm64_get_bp_hardening_data();
if (d->fn)
d->fn();
}
#else
static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
{
return NULL;
}
static inline void arm64_apply_bp_hardening(void) { }
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
extern void paging_init(void); extern void paging_init(void);
extern void bootmem_init(void); extern void bootmem_init(void);
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt); extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
......
...@@ -439,6 +439,7 @@ ...@@ -439,6 +439,7 @@
/* id_aa64pfr0 */ /* id_aa64pfr0 */
#define ID_AA64PFR0_CSV3_SHIFT 60 #define ID_AA64PFR0_CSV3_SHIFT 60
#define ID_AA64PFR0_CSV2_SHIFT 56
#define ID_AA64PFR0_SVE_SHIFT 32 #define ID_AA64PFR0_SVE_SHIFT 32
#define ID_AA64PFR0_GIC_SHIFT 24 #define ID_AA64PFR0_GIC_SHIFT 24
#define ID_AA64PFR0_ASIMD_SHIFT 20 #define ID_AA64PFR0_ASIMD_SHIFT 20
......
...@@ -53,6 +53,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o ...@@ -53,6 +53,10 @@ arm64-obj-$(CONFIG_ARM64_RELOC_TEST) += arm64-reloc-test.o
arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o arm64-reloc-test-y := reloc_test_core.o reloc_test_syms.o
arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o arm64-obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
ifeq ($(CONFIG_KVM),y)
arm64-obj-$(CONFIG_HARDEN_BRANCH_PREDICTOR) += bpi.o
endif
obj-y += $(arm64-obj-y) vdso/ probes/ obj-y += $(arm64-obj-y) vdso/ probes/
obj-m += $(arm64-obj-m) obj-m += $(arm64-obj-m)
head-y := head.o head-y := head.o
......
/*
* Contains CPU specific branch predictor invalidation sequences
*
* Copyright (C) 2018 ARM Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/linkage.h>
.macro ventry target
.rept 31
nop
.endr
b \target
.endm
.macro vectors target
ventry \target + 0x000
ventry \target + 0x080
ventry \target + 0x100
ventry \target + 0x180
ventry \target + 0x200
ventry \target + 0x280
ventry \target + 0x300
ventry \target + 0x380
ventry \target + 0x400
ventry \target + 0x480
ventry \target + 0x500
ventry \target + 0x580
ventry \target + 0x600
ventry \target + 0x680
ventry \target + 0x700
ventry \target + 0x780
.endm
.align 11
ENTRY(__bp_harden_hyp_vecs_start)
.rept 4
vectors __kvm_hyp_vector
.endr
ENTRY(__bp_harden_hyp_vecs_end)
...@@ -46,6 +46,80 @@ static int cpu_enable_trap_ctr_access(void *__unused) ...@@ -46,6 +46,80 @@ static int cpu_enable_trap_ctr_access(void *__unused)
return 0; return 0;
} }
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
#ifdef CONFIG_KVM
static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
int i;
for (i = 0; i < SZ_2K; i += 0x80)
memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
}
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
static int last_slot = -1;
static DEFINE_SPINLOCK(bp_lock);
int cpu, slot = -1;
spin_lock(&bp_lock);
for_each_possible_cpu(cpu) {
if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
break;
}
}
if (slot == -1) {
last_slot++;
BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
/ SZ_2K) <= last_slot);
slot = last_slot;
__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
}
__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
__this_cpu_write(bp_hardening_data.fn, fn);
spin_unlock(&bp_lock);
}
#else
static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
__this_cpu_write(bp_hardening_data.fn, fn);
}
#endif /* CONFIG_KVM */
static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
bp_hardening_cb_t fn,
const char *hyp_vecs_start,
const char *hyp_vecs_end)
{
u64 pfr0;
if (!entry->matches(entry, SCOPE_LOCAL_CPU))
return;
pfr0 = read_cpuid(ID_AA64PFR0_EL1);
if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
return;
__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
}
#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
#define MIDR_RANGE(model, min, max) \ #define MIDR_RANGE(model, min, max) \
.def_scope = SCOPE_LOCAL_CPU, \ .def_scope = SCOPE_LOCAL_CPU, \
.matches = is_affected_midr_range, \ .matches = is_affected_midr_range, \
......
...@@ -147,6 +147,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = { ...@@ -147,6 +147,7 @@ static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = { static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0), ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI), S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
......
...@@ -721,12 +721,15 @@ el0_ia: ...@@ -721,12 +721,15 @@ el0_ia:
* Instruction abort handling * Instruction abort handling
*/ */
mrs x26, far_el1 mrs x26, far_el1
enable_daif enable_da_f
#ifdef CONFIG_TRACE_IRQFLAGS
bl trace_hardirqs_off
#endif
ct_user_exit ct_user_exit
mov x0, x26 mov x0, x26
mov x1, x25 mov x1, x25
mov x2, sp mov x2, sp
bl do_mem_abort bl do_el0_ia_bp_hardening
b ret_to_user b ret_to_user
el0_fpsimd_acc: el0_fpsimd_acc:
/* /*
......
...@@ -249,6 +249,8 @@ asmlinkage void post_ttbr_update_workaround(void) ...@@ -249,6 +249,8 @@ asmlinkage void post_ttbr_update_workaround(void)
"ic iallu; dsb nsh; isb", "ic iallu; dsb nsh; isb",
ARM64_WORKAROUND_CAVIUM_27456, ARM64_WORKAROUND_CAVIUM_27456,
CONFIG_CAVIUM_ERRATUM_27456)); CONFIG_CAVIUM_ERRATUM_27456));
arm64_apply_bp_hardening();
} }
static int asids_init(void) static int asids_init(void)
......
...@@ -708,6 +708,23 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr, ...@@ -708,6 +708,23 @@ asmlinkage void __exception do_mem_abort(unsigned long addr, unsigned int esr,
arm64_notify_die("", regs, &info, esr); arm64_notify_die("", regs, &info, esr);
} }
asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
/*
* We've taken an instruction abort from userspace and not yet
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
if (addr > TASK_SIZE)
arm64_apply_bp_hardening();
local_irq_enable();
do_mem_abort(addr, esr, regs);
}
asmlinkage void __exception do_sp_pc_abort(unsigned long addr, asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
unsigned int esr, unsigned int esr,
struct pt_regs *regs) struct pt_regs *regs)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册