diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 1645e0d3a2c1408b95dc8f0b45bcb40a6cb05747..794fe81226024f6e3f2cbfc3232daa3a9e62c4a4 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -108,6 +108,13 @@ dmb \opt .endm +/* + * RAS Error Synchronization barrier + */ + .macro esb + hint #16 + .endm + /* * NOP sequence */ diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index c367838700fa723994528559106bdd43216b1a82..803443d74926751da6b149d82e5f35a9f44cdadc 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -140,6 +140,13 @@ #define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) #define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1) +#define DISR_EL1_IDS (UL(1) << 24) +/* + * DISR_EL1 and ESR_ELx share the bottom 13 bits, but the RES0 bits may mean + * different things in the future... + */ +#define DISR_EL1_ESR_MASK (ESR_ELx_AET | ESR_ELx_EA | ESR_ELx_FSC) + /* ESR value templates for specific events */ /* BRK instruction trap from AArch64 state */ diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 0c2eec490abf65933b2d965270dfaf89523f3417..bc30429d8e91eb288bb81217380b67fc614e2db2 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -18,6 +18,8 @@ #ifndef __ASM_EXCEPTION_H #define __ASM_EXCEPTION_H +#include + #include #define __exception __attribute__((section(".exception.text"))) @@ -27,4 +29,16 @@ #define __exception_irq_entry __exception #endif +static inline u32 disr_to_esr(u64 disr) +{ + unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT; + + if ((disr & DISR_EL1_IDS) == 0) + esr |= (disr & DISR_EL1_ESR_MASK); + else + esr |= (disr & ESR_ELx_ISS_MASK); + + return esr; +} + #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 023cacb946c3df99e1ea40f431ee5f2b2134d154..cee4ae25a5d18aed0309fdfad789dbccad63480b 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -216,6 +216,7 @@ static inline void spin_lock_prefetch(const void *ptr) int cpu_enable_pan(void *__unused); int cpu_enable_cache_maint_trap(void *__unused); +int cpu_clear_disr(void *__unused); /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ #define SVE_SET_VL(arg) sve_set_current_vl(arg) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1281bc8263c2e9ab47f14d4893da3beb4e8e33c4..b5d543fc677d007f6d6e556650a9aa459c4d83c0 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -279,6 +279,7 @@ #define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0) #define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0) +#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1) #define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0) #define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1) diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index 116a5c9443a726073316c7c0ceb7820433828fa5..5612d6f46331cb7a15011f4f2ae5272ff40e6ecb 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -1039,6 +1039,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { .sign = FTR_UNSIGNED, .field_pos = ID_AA64PFR0_RAS_SHIFT, .min_field_value = ID_AA64PFR0_RAS_V1, + .enable = cpu_clear_disr, }, #endif /* CONFIG_ARM64_RAS_EXTN */ {}, @@ -1470,3 +1471,11 @@ static int __init enable_mrs_emulation(void) } core_initcall(enable_mrs_emulation); + +int cpu_clear_disr(void *__unused) +{ + /* Firmware may have left a deferred SError in this register. */ + write_sysreg_s(0, SYS_DISR_EL1); + + return 0; +} diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index ec4c3d82b4d6249e846b0788ce937386d52e2e9b..05e4ae934b23025d4c5cfc5bde4c147e18a0afeb 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -132,6 +132,11 @@ alternative_endif ubfx x11, x11, #1, #1 msr oslar_el1, x11 reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + +alternative_if ARM64_HAS_RAS_EXTN + msr_s SYS_DISR_EL1, xzr +alternative_else_nop_endif + isb ret ENDPROC(cpu_do_resume)