提交 68ddbf09 编写于 作者: J James Morse 提交者: Catalin Marinas

arm64: kernel: Prepare for a DISR user

KVM would like to consume any pending SError (or RAS error) after guest
exit. Today it has to unmask SError and use dsb+isb to synchronise the
CPU. With the RAS extensions we can use ESB to synchronise any pending
SError.

Add the necessary macros to allow DISR to be read and converted to an
ESR.

We clear the DISR register when we enable the RAS cpufeature, and the
kernel has not executed any ESB instructions. Any value we find in DISR
must have belonged to firmware. Executing an ESB instruction is the
only way to update DISR, so we can expect firmware to have handled
any deferred SError. By the same logic we clear DISR in the idle path.
Reviewed-by: NSuzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: NJames Morse <james.morse@arm.com>
Reviewed-by: NCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: NCatalin Marinas <catalin.marinas@arm.com>
上级 f751daa4
...@@ -108,6 +108,13 @@ ...@@ -108,6 +108,13 @@
dmb \opt dmb \opt
.endm .endm
/*
* RAS Error Synchronization barrier
*/
.macro esb
hint #16
.endm
/* /*
* NOP sequence * NOP sequence
*/ */
......
...@@ -140,6 +140,13 @@ ...@@ -140,6 +140,13 @@
#define ESR_ELx_WFx_ISS_WFE (UL(1) << 0) #define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1) #define ESR_ELx_xVC_IMM_MASK ((1UL << 16) - 1)
#define DISR_EL1_IDS (UL(1) << 24)
/*
* DISR_EL1 and ESR_ELx share the bottom 13 bits, but the RES0 bits may mean
* different things in the future...
*/
#define DISR_EL1_ESR_MASK (ESR_ELx_AET | ESR_ELx_EA | ESR_ELx_FSC)
/* ESR value templates for specific events */ /* ESR value templates for specific events */
/* BRK instruction trap from AArch64 state */ /* BRK instruction trap from AArch64 state */
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#ifndef __ASM_EXCEPTION_H #ifndef __ASM_EXCEPTION_H
#define __ASM_EXCEPTION_H #define __ASM_EXCEPTION_H
#include <asm/esr.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#define __exception __attribute__((section(".exception.text"))) #define __exception __attribute__((section(".exception.text")))
...@@ -27,4 +29,16 @@ ...@@ -27,4 +29,16 @@
#define __exception_irq_entry __exception #define __exception_irq_entry __exception
#endif #endif
static inline u32 disr_to_esr(u64 disr)
{
unsigned int esr = ESR_ELx_EC_SERROR << ESR_ELx_EC_SHIFT;
if ((disr & DISR_EL1_IDS) == 0)
esr |= (disr & DISR_EL1_ESR_MASK);
else
esr |= (disr & ESR_ELx_ISS_MASK);
return esr;
}
#endif /* __ASM_EXCEPTION_H */ #endif /* __ASM_EXCEPTION_H */
...@@ -216,6 +216,7 @@ static inline void spin_lock_prefetch(const void *ptr) ...@@ -216,6 +216,7 @@ static inline void spin_lock_prefetch(const void *ptr)
int cpu_enable_pan(void *__unused); int cpu_enable_pan(void *__unused);
int cpu_enable_cache_maint_trap(void *__unused); int cpu_enable_cache_maint_trap(void *__unused);
int cpu_clear_disr(void *__unused);
/* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */ /* Userspace interface for PR_SVE_{SET,GET}_VL prctl()s: */
#define SVE_SET_VL(arg) sve_set_current_vl(arg) #define SVE_SET_VL(arg) sve_set_current_vl(arg)
......
...@@ -279,6 +279,7 @@ ...@@ -279,6 +279,7 @@
#define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0) #define SYS_AMAIR_EL1 sys_reg(3, 0, 10, 3, 0)
#define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0) #define SYS_VBAR_EL1 sys_reg(3, 0, 12, 0, 0)
#define SYS_DISR_EL1 sys_reg(3, 0, 12, 1, 1)
#define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0) #define SYS_ICC_IAR0_EL1 sys_reg(3, 0, 12, 8, 0)
#define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1) #define SYS_ICC_EOIR0_EL1 sys_reg(3, 0, 12, 8, 1)
......
...@@ -1039,6 +1039,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -1039,6 +1039,7 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.sign = FTR_UNSIGNED, .sign = FTR_UNSIGNED,
.field_pos = ID_AA64PFR0_RAS_SHIFT, .field_pos = ID_AA64PFR0_RAS_SHIFT,
.min_field_value = ID_AA64PFR0_RAS_V1, .min_field_value = ID_AA64PFR0_RAS_V1,
.enable = cpu_clear_disr,
}, },
#endif /* CONFIG_ARM64_RAS_EXTN */ #endif /* CONFIG_ARM64_RAS_EXTN */
{}, {},
...@@ -1470,3 +1471,11 @@ static int __init enable_mrs_emulation(void) ...@@ -1470,3 +1471,11 @@ static int __init enable_mrs_emulation(void)
} }
core_initcall(enable_mrs_emulation); core_initcall(enable_mrs_emulation);
int cpu_clear_disr(void *__unused)
{
/* Firmware may have left a deferred SError in this register. */
write_sysreg_s(0, SYS_DISR_EL1);
return 0;
}
...@@ -132,6 +132,11 @@ alternative_endif ...@@ -132,6 +132,11 @@ alternative_endif
ubfx x11, x11, #1, #1 ubfx x11, x11, #1, #1
msr oslar_el1, x11 msr oslar_el1, x11
reset_pmuserenr_el0 x0 // Disable PMU access from EL0 reset_pmuserenr_el0 x0 // Disable PMU access from EL0
alternative_if ARM64_HAS_RAS_EXTN
msr_s SYS_DISR_EL1, xzr
alternative_else_nop_endif
isb isb
ret ret
ENDPROC(cpu_do_resume) ENDPROC(cpu_do_resume)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册