提交 0ce339a9 编写于 作者: C Catalin Marinas

Merge branch 'arm64/common-esr-macros' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux

ESR_ELx definitions clean-up from Mark Rutland.

* 'arm64/common-esr-macros' of git://git.kernel.org/pub/scm/linux/kernel/git/mark/linux:
  arm64: kvm: decode ESR_ELx.EC when reporting exceptions
  arm64: kvm: remove ESR_EL2_* macros
  arm64: remove ESR_EL1_* macros
  arm64: kvm: move to ESR_ELx macros
  arm64: decode ESR_ELx.EC when reporting exceptions
  arm64: move to ESR_ELx macros
  arm64: introduce common ESR_ELx_* definitions
...@@ -18,40 +18,89 @@ ...@@ -18,40 +18,89 @@
#ifndef __ASM_ESR_H #ifndef __ASM_ESR_H
#define __ASM_ESR_H #define __ASM_ESR_H
#define ESR_EL1_WRITE (1 << 6) #define ESR_ELx_EC_UNKNOWN (0x00)
#define ESR_EL1_CM (1 << 8) #define ESR_ELx_EC_WFx (0x01)
#define ESR_EL1_IL (1 << 25) /* Unallocated EC: 0x02 */
#define ESR_ELx_EC_CP15_32 (0x03)
#define ESR_ELx_EC_CP15_64 (0x04)
#define ESR_ELx_EC_CP14_MR (0x05)
#define ESR_ELx_EC_CP14_LS (0x06)
#define ESR_ELx_EC_FP_ASIMD (0x07)
#define ESR_ELx_EC_CP10_ID (0x08)
/* Unallocated EC: 0x09 - 0x0B */
#define ESR_ELx_EC_CP14_64 (0x0C)
/* Unallocated EC: 0x0d */
#define ESR_ELx_EC_ILL (0x0E)
/* Unallocated EC: 0x0F - 0x10 */
#define ESR_ELx_EC_SVC32 (0x11)
#define ESR_ELx_EC_HVC32 (0x12)
#define ESR_ELx_EC_SMC32 (0x13)
/* Unallocated EC: 0x14 */
#define ESR_ELx_EC_SVC64 (0x15)
#define ESR_ELx_EC_HVC64 (0x16)
#define ESR_ELx_EC_SMC64 (0x17)
#define ESR_ELx_EC_SYS64 (0x18)
/* Unallocated EC: 0x19 - 0x1E */
#define ESR_ELx_EC_IMP_DEF (0x1f)
#define ESR_ELx_EC_IABT_LOW (0x20)
#define ESR_ELx_EC_IABT_CUR (0x21)
#define ESR_ELx_EC_PC_ALIGN (0x22)
/* Unallocated EC: 0x23 */
#define ESR_ELx_EC_DABT_LOW (0x24)
#define ESR_ELx_EC_DABT_CUR (0x25)
#define ESR_ELx_EC_SP_ALIGN (0x26)
/* Unallocated EC: 0x27 */
#define ESR_ELx_EC_FP_EXC32 (0x28)
/* Unallocated EC: 0x29 - 0x2B */
#define ESR_ELx_EC_FP_EXC64 (0x2C)
/* Unallocated EC: 0x2D - 0x2E */
#define ESR_ELx_EC_SERROR (0x2F)
#define ESR_ELx_EC_BREAKPT_LOW (0x30)
#define ESR_ELx_EC_BREAKPT_CUR (0x31)
#define ESR_ELx_EC_SOFTSTP_LOW (0x32)
#define ESR_ELx_EC_SOFTSTP_CUR (0x33)
#define ESR_ELx_EC_WATCHPT_LOW (0x34)
#define ESR_ELx_EC_WATCHPT_CUR (0x35)
/* Unallocated EC: 0x36 - 0x37 */
#define ESR_ELx_EC_BKPT32 (0x38)
/* Unallocated EC: 0x39 */
#define ESR_ELx_EC_VECTOR32 (0x3A)
/* Unallocted EC: 0x3B */
#define ESR_ELx_EC_BRK64 (0x3C)
/* Unallocated EC: 0x3D - 0x3F */
#define ESR_ELx_EC_MAX (0x3F)
#define ESR_EL1_EC_SHIFT (26) #define ESR_ELx_EC_SHIFT (26)
#define ESR_EL1_EC_UNKNOWN (0x00) #define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
#define ESR_EL1_EC_WFI (0x01)
#define ESR_EL1_EC_CP15_32 (0x03) #define ESR_ELx_IL (UL(1) << 25)
#define ESR_EL1_EC_CP15_64 (0x04) #define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
#define ESR_EL1_EC_CP14_MR (0x05) #define ESR_ELx_ISV (UL(1) << 24)
#define ESR_EL1_EC_CP14_LS (0x06) #define ESR_ELx_SAS_SHIFT (22)
#define ESR_EL1_EC_FP_ASIMD (0x07) #define ESR_ELx_SAS (UL(3) << ESR_ELx_SAS_SHIFT)
#define ESR_EL1_EC_CP10_ID (0x08) #define ESR_ELx_SSE (UL(1) << 21)
#define ESR_EL1_EC_CP14_64 (0x0C) #define ESR_ELx_SRT_SHIFT (16)
#define ESR_EL1_EC_ILL_ISS (0x0E) #define ESR_ELx_SRT_MASK (UL(0x1F) << ESR_ELx_SRT_SHIFT)
#define ESR_EL1_EC_SVC32 (0x11) #define ESR_ELx_SF (UL(1) << 15)
#define ESR_EL1_EC_SVC64 (0x15) #define ESR_ELx_AR (UL(1) << 14)
#define ESR_EL1_EC_SYS64 (0x18) #define ESR_ELx_EA (UL(1) << 9)
#define ESR_EL1_EC_IABT_EL0 (0x20) #define ESR_ELx_CM (UL(1) << 8)
#define ESR_EL1_EC_IABT_EL1 (0x21) #define ESR_ELx_S1PTW (UL(1) << 7)
#define ESR_EL1_EC_PC_ALIGN (0x22) #define ESR_ELx_WNR (UL(1) << 6)
#define ESR_EL1_EC_DABT_EL0 (0x24) #define ESR_ELx_FSC (0x3F)
#define ESR_EL1_EC_DABT_EL1 (0x25) #define ESR_ELx_FSC_TYPE (0x3C)
#define ESR_EL1_EC_SP_ALIGN (0x26) #define ESR_ELx_FSC_EXTABT (0x10)
#define ESR_EL1_EC_FP_EXC32 (0x28) #define ESR_ELx_FSC_FAULT (0x04)
#define ESR_EL1_EC_FP_EXC64 (0x2C) #define ESR_ELx_FSC_PERM (0x0C)
#define ESR_EL1_EC_SERROR (0x2F) #define ESR_ELx_CV (UL(1) << 24)
#define ESR_EL1_EC_BREAKPT_EL0 (0x30) #define ESR_ELx_COND_SHIFT (20)
#define ESR_EL1_EC_BREAKPT_EL1 (0x31) #define ESR_ELx_COND_MASK (UL(0xF) << ESR_ELx_COND_SHIFT)
#define ESR_EL1_EC_SOFTSTP_EL0 (0x32) #define ESR_ELx_WFx_ISS_WFE (UL(1) << 0)
#define ESR_EL1_EC_SOFTSTP_EL1 (0x33)
#define ESR_EL1_EC_WATCHPT_EL0 (0x34) #ifndef __ASSEMBLY__
#define ESR_EL1_EC_WATCHPT_EL1 (0x35) #include <asm/types.h>
#define ESR_EL1_EC_BKPT32 (0x38)
#define ESR_EL1_EC_BRK64 (0x3C) const char *esr_get_class_string(u32 esr);
#endif /* __ASSEMBLY */
#endif /* __ASM_ESR_H */ #endif /* __ASM_ESR_H */
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#ifndef __ARM64_KVM_ARM_H__ #ifndef __ARM64_KVM_ARM_H__
#define __ARM64_KVM_ARM_H__ #define __ARM64_KVM_ARM_H__
#include <asm/esr.h>
#include <asm/memory.h> #include <asm/memory.h>
#include <asm/types.h> #include <asm/types.h>
...@@ -184,77 +185,11 @@ ...@@ -184,77 +185,11 @@
#define MDCR_EL2_TPMCR (1 << 5) #define MDCR_EL2_TPMCR (1 << 5)
#define MDCR_EL2_HPMN_MASK (0x1F) #define MDCR_EL2_HPMN_MASK (0x1F)
/* Exception Syndrome Register (ESR) bits */ /* For compatibility with fault code shared with 32-bit */
#define ESR_EL2_EC_SHIFT (26) #define FSC_FAULT ESR_ELx_FSC_FAULT
#define ESR_EL2_EC (UL(0x3f) << ESR_EL2_EC_SHIFT) #define FSC_PERM ESR_ELx_FSC_PERM
#define ESR_EL2_IL (UL(1) << 25)
#define ESR_EL2_ISS (ESR_EL2_IL - 1)
#define ESR_EL2_ISV_SHIFT (24)
#define ESR_EL2_ISV (UL(1) << ESR_EL2_ISV_SHIFT)
#define ESR_EL2_SAS_SHIFT (22)
#define ESR_EL2_SAS (UL(3) << ESR_EL2_SAS_SHIFT)
#define ESR_EL2_SSE (1 << 21)
#define ESR_EL2_SRT_SHIFT (16)
#define ESR_EL2_SRT_MASK (0x1f << ESR_EL2_SRT_SHIFT)
#define ESR_EL2_SF (1 << 15)
#define ESR_EL2_AR (1 << 14)
#define ESR_EL2_EA (1 << 9)
#define ESR_EL2_CM (1 << 8)
#define ESR_EL2_S1PTW (1 << 7)
#define ESR_EL2_WNR (1 << 6)
#define ESR_EL2_FSC (0x3f)
#define ESR_EL2_FSC_TYPE (0x3c)
#define ESR_EL2_CV_SHIFT (24)
#define ESR_EL2_CV (UL(1) << ESR_EL2_CV_SHIFT)
#define ESR_EL2_COND_SHIFT (20)
#define ESR_EL2_COND (UL(0xf) << ESR_EL2_COND_SHIFT)
#define FSC_FAULT (0x04)
#define FSC_PERM (0x0c)
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
#define HPFAR_MASK (~UL(0xf)) #define HPFAR_MASK (~UL(0xf))
#define ESR_EL2_EC_UNKNOWN (0x00)
#define ESR_EL2_EC_WFI (0x01)
#define ESR_EL2_EC_CP15_32 (0x03)
#define ESR_EL2_EC_CP15_64 (0x04)
#define ESR_EL2_EC_CP14_MR (0x05)
#define ESR_EL2_EC_CP14_LS (0x06)
#define ESR_EL2_EC_FP_ASIMD (0x07)
#define ESR_EL2_EC_CP10_ID (0x08)
#define ESR_EL2_EC_CP14_64 (0x0C)
#define ESR_EL2_EC_ILL_ISS (0x0E)
#define ESR_EL2_EC_SVC32 (0x11)
#define ESR_EL2_EC_HVC32 (0x12)
#define ESR_EL2_EC_SMC32 (0x13)
#define ESR_EL2_EC_SVC64 (0x15)
#define ESR_EL2_EC_HVC64 (0x16)
#define ESR_EL2_EC_SMC64 (0x17)
#define ESR_EL2_EC_SYS64 (0x18)
#define ESR_EL2_EC_IABT (0x20)
#define ESR_EL2_EC_IABT_HYP (0x21)
#define ESR_EL2_EC_PC_ALIGN (0x22)
#define ESR_EL2_EC_DABT (0x24)
#define ESR_EL2_EC_DABT_HYP (0x25)
#define ESR_EL2_EC_SP_ALIGN (0x26)
#define ESR_EL2_EC_FP_EXC32 (0x28)
#define ESR_EL2_EC_FP_EXC64 (0x2C)
#define ESR_EL2_EC_SERROR (0x2F)
#define ESR_EL2_EC_BREAKPT (0x30)
#define ESR_EL2_EC_BREAKPT_HYP (0x31)
#define ESR_EL2_EC_SOFTSTP (0x32)
#define ESR_EL2_EC_SOFTSTP_HYP (0x33)
#define ESR_EL2_EC_WATCHPT (0x34)
#define ESR_EL2_EC_WATCHPT_HYP (0x35)
#define ESR_EL2_EC_BKPT32 (0x38)
#define ESR_EL2_EC_VECTOR32 (0x3A)
#define ESR_EL2_EC_BRK64 (0x3C)
#define ESR_EL2_EC_xABT_xFSR_EXTABT 0x10
#define ESR_EL2_EC_WFI_ISS_WFE (1 << 0)
#endif /* __ARM64_KVM_ARM_H__ */ #endif /* __ARM64_KVM_ARM_H__ */
...@@ -23,8 +23,10 @@ ...@@ -23,8 +23,10 @@
#define __ARM64_KVM_EMULATE_H__ #define __ARM64_KVM_EMULATE_H__
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/esr.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h> #include <asm/kvm_mmio.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
...@@ -128,63 +130,63 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) ...@@ -128,63 +130,63 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_ISV); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
} }
static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_WNR); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
} }
static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SSE); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
} }
static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{ {
return (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SRT_MASK) >> ESR_EL2_SRT_SHIFT; return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
} }
static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EA); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
} }
static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_S1PTW); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
} }
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{ {
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_EL2_SAS) >> ESR_EL2_SAS_SHIFT); return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
} }
/* This one is not specific to Data Abort */ /* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
{ {
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_EL2_IL); return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
} }
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) >> ESR_EL2_EC_SHIFT; return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
} }
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_trap_get_class(vcpu) == ESR_EL2_EC_IABT; return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
} }
static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC; return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
} }
static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
{ {
return kvm_vcpu_get_hsr(vcpu) & ESR_EL2_FSC_TYPE; return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
} }
static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu) static inline unsigned long kvm_vcpu_get_mpidr(struct kvm_vcpu *vcpu)
......
...@@ -269,18 +269,18 @@ ENDPROC(el1_error_invalid) ...@@ -269,18 +269,18 @@ ENDPROC(el1_error_invalid)
el1_sync: el1_sync:
kernel_entry 1 kernel_entry 1
mrs x1, esr_el1 // read the syndrome register mrs x1, esr_el1 // read the syndrome register
lsr x24, x1, #ESR_EL1_EC_SHIFT // exception class lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_EL1_EC_DABT_EL1 // data abort in EL1 cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1
b.eq el1_da b.eq el1_da
cmp x24, #ESR_EL1_EC_SYS64 // configurable trap cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el1_undef b.eq el1_undef
cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el1_sp_pc b.eq el1_sp_pc
cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el1_sp_pc b.eq el1_sp_pc
cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL1 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1
b.eq el1_undef b.eq el1_undef
cmp x24, #ESR_EL1_EC_BREAKPT_EL1 // debug exception in EL1 cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1
b.ge el1_dbg b.ge el1_dbg
b el1_inv b el1_inv
el1_da: el1_da:
...@@ -318,7 +318,7 @@ el1_dbg: ...@@ -318,7 +318,7 @@ el1_dbg:
/* /*
* Debug exception handling * Debug exception handling
*/ */
cmp x24, #ESR_EL1_EC_BRK64 // if BRK64 cmp x24, #ESR_ELx_EC_BRK64 // if BRK64
cinc x24, x24, eq // set bit '0' cinc x24, x24, eq // set bit '0'
tbz x24, #0, el1_inv // EL1 only tbz x24, #0, el1_inv // EL1 only
mrs x0, far_el1 mrs x0, far_el1
...@@ -375,26 +375,26 @@ el1_preempt: ...@@ -375,26 +375,26 @@ el1_preempt:
el0_sync: el0_sync:
kernel_entry 0 kernel_entry 0
mrs x25, esr_el1 // read the syndrome register mrs x25, esr_el1 // read the syndrome register
lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_EL1_EC_SVC64 // SVC in 64-bit state cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state
b.eq el0_svc b.eq el0_svc
cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da b.eq el0_da
cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia b.eq el0_ia
cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc b.eq el0_fpsimd_acc
cmp x24, #ESR_EL1_EC_FP_EXC64 // FP/ASIMD exception cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
b.eq el0_fpsimd_exc b.eq el0_fpsimd_exc
cmp x24, #ESR_EL1_EC_SYS64 // configurable trap cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
b.eq el0_undef b.eq el0_undef
cmp x24, #ESR_EL1_EC_SP_ALIGN // stack alignment exception cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
b.eq el0_sp_pc b.eq el0_sp_pc
cmp x24, #ESR_EL1_EC_PC_ALIGN // pc alignment exception cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
b.eq el0_sp_pc b.eq el0_sp_pc
cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef b.eq el0_undef
cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg b.ge el0_dbg
b el0_inv b el0_inv
...@@ -403,30 +403,30 @@ el0_sync: ...@@ -403,30 +403,30 @@ el0_sync:
el0_sync_compat: el0_sync_compat:
kernel_entry 0, 32 kernel_entry 0, 32
mrs x25, esr_el1 // read the syndrome register mrs x25, esr_el1 // read the syndrome register
lsr x24, x25, #ESR_EL1_EC_SHIFT // exception class lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class
cmp x24, #ESR_EL1_EC_SVC32 // SVC in 32-bit state cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state
b.eq el0_svc_compat b.eq el0_svc_compat
cmp x24, #ESR_EL1_EC_DABT_EL0 // data abort in EL0 cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0
b.eq el0_da b.eq el0_da
cmp x24, #ESR_EL1_EC_IABT_EL0 // instruction abort in EL0 cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0
b.eq el0_ia b.eq el0_ia
cmp x24, #ESR_EL1_EC_FP_ASIMD // FP/ASIMD access cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access
b.eq el0_fpsimd_acc b.eq el0_fpsimd_acc
cmp x24, #ESR_EL1_EC_FP_EXC32 // FP/ASIMD exception cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception
b.eq el0_fpsimd_exc b.eq el0_fpsimd_exc
cmp x24, #ESR_EL1_EC_UNKNOWN // unknown exception in EL0 cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0
b.eq el0_undef b.eq el0_undef
cmp x24, #ESR_EL1_EC_CP15_32 // CP15 MRC/MCR trap cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap
b.eq el0_undef b.eq el0_undef
cmp x24, #ESR_EL1_EC_CP15_64 // CP15 MRRC/MCRR trap cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap
b.eq el0_undef b.eq el0_undef
cmp x24, #ESR_EL1_EC_CP14_MR // CP14 MRC/MCR trap cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap
b.eq el0_undef b.eq el0_undef
cmp x24, #ESR_EL1_EC_CP14_LS // CP14 LDC/STC trap cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap
b.eq el0_undef b.eq el0_undef
cmp x24, #ESR_EL1_EC_CP14_64 // CP14 MRRC/MCRR trap cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap
b.eq el0_undef b.eq el0_undef
cmp x24, #ESR_EL1_EC_BREAKPT_EL0 // debug exception in EL0 cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0
b.ge el0_dbg b.ge el0_dbg
b el0_inv b el0_inv
el0_svc_compat: el0_svc_compat:
......
...@@ -501,7 +501,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf, ...@@ -501,7 +501,7 @@ static int compat_setup_sigframe(struct compat_sigframe __user *sf,
__put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err); __put_user_error((compat_ulong_t)0, &sf->uc.uc_mcontext.trap_no, err);
/* set the compat FSR WnR */ /* set the compat FSR WnR */
__put_user_error(!!(current->thread.fault_code & ESR_EL1_WRITE) << __put_user_error(!!(current->thread.fault_code & ESR_ELx_WNR) <<
FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err); FSR_WRITE_SHIFT, &sf->uc.uc_mcontext.error_code, err);
__put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err); __put_user_error(current->thread.fault_address, &sf->uc.uc_mcontext.fault_address, err);
__put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err); __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
......
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/traps.h> #include <asm/traps.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/exception.h> #include <asm/exception.h>
...@@ -373,6 +374,51 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs) ...@@ -373,6 +374,51 @@ asmlinkage long do_ni_syscall(struct pt_regs *regs)
return sys_ni_syscall(); return sys_ni_syscall();
} }
static const char *esr_class_str[] = {
[0 ... ESR_ELx_EC_MAX] = "UNRECOGNIZED EC",
[ESR_ELx_EC_UNKNOWN] = "Unknown/Uncategorized",
[ESR_ELx_EC_WFx] = "WFI/WFE",
[ESR_ELx_EC_CP15_32] = "CP15 MCR/MRC",
[ESR_ELx_EC_CP15_64] = "CP15 MCRR/MRRC",
[ESR_ELx_EC_CP14_MR] = "CP14 MCR/MRC",
[ESR_ELx_EC_CP14_LS] = "CP14 LDC/STC",
[ESR_ELx_EC_FP_ASIMD] = "ASIMD",
[ESR_ELx_EC_CP10_ID] = "CP10 MRC/VMRS",
[ESR_ELx_EC_CP14_64] = "CP14 MCRR/MRRC",
[ESR_ELx_EC_ILL] = "PSTATE.IL",
[ESR_ELx_EC_SVC32] = "SVC (AArch32)",
[ESR_ELx_EC_HVC32] = "HVC (AArch32)",
[ESR_ELx_EC_SMC32] = "SMC (AArch32)",
[ESR_ELx_EC_SVC64] = "SVC (AArch64)",
[ESR_ELx_EC_HVC64] = "HVC (AArch64)",
[ESR_ELx_EC_SMC64] = "SMC (AArch64)",
[ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)",
[ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF",
[ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)",
[ESR_ELx_EC_IABT_CUR] = "IABT (current EL)",
[ESR_ELx_EC_PC_ALIGN] = "PC Alignment",
[ESR_ELx_EC_DABT_LOW] = "DABT (lower EL)",
[ESR_ELx_EC_DABT_CUR] = "DABT (current EL)",
[ESR_ELx_EC_SP_ALIGN] = "SP Alignment",
[ESR_ELx_EC_FP_EXC32] = "FP (AArch32)",
[ESR_ELx_EC_FP_EXC64] = "FP (AArch64)",
[ESR_ELx_EC_SERROR] = "SError",
[ESR_ELx_EC_BREAKPT_LOW] = "Breakpoint (lower EL)",
[ESR_ELx_EC_BREAKPT_CUR] = "Breakpoint (current EL)",
[ESR_ELx_EC_SOFTSTP_LOW] = "Software Step (lower EL)",
[ESR_ELx_EC_SOFTSTP_CUR] = "Software Step (current EL)",
[ESR_ELx_EC_WATCHPT_LOW] = "Watchpoint (lower EL)",
[ESR_ELx_EC_WATCHPT_CUR] = "Watchpoint (current EL)",
[ESR_ELx_EC_BKPT32] = "BKPT (AArch32)",
[ESR_ELx_EC_VECTOR32] = "Vector catch (AArch32)",
[ESR_ELx_EC_BRK64] = "BRK (AArch64)",
};
const char *esr_get_class_string(u32 esr)
{
return esr_class_str[esr >> ESR_ELx_EC_SHIFT];
}
/* /*
* bad_mode handles the impossible case in the exception vector. * bad_mode handles the impossible case in the exception vector.
*/ */
...@@ -382,8 +428,8 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr) ...@@ -382,8 +428,8 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
void __user *pc = (void __user *)instruction_pointer(regs); void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose(); console_verbose();
pr_crit("Bad mode in %s handler detected, code 0x%08x\n", pr_crit("Bad mode in %s handler detected, code 0x%08x -- %s\n",
handler[reason], esr); handler[reason], esr, esr_get_class_string(esr));
__show_regs(regs); __show_regs(regs);
info.si_signo = SIGILL; info.si_signo = SIGILL;
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
*/ */
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/esr.h>
#include <asm/kvm_emulate.h> #include <asm/kvm_emulate.h>
/* /*
...@@ -55,8 +56,8 @@ static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) ...@@ -55,8 +56,8 @@ static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{ {
u32 esr = kvm_vcpu_get_hsr(vcpu); u32 esr = kvm_vcpu_get_hsr(vcpu);
if (esr & ESR_EL2_CV) if (esr & ESR_ELx_CV)
return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT; return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
return -1; return -1;
} }
......
...@@ -21,8 +21,10 @@ ...@@ -21,8 +21,10 @@
#include <linux/kvm.h> #include <linux/kvm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/esr.h>
#include <asm/kvm_coproc.h> #include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/kvm_psci.h> #include <asm/kvm_psci.h>
...@@ -61,7 +63,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -61,7 +63,7 @@ static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/ */
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
{ {
if (kvm_vcpu_get_hsr(vcpu) & ESR_EL2_EC_WFI_ISS_WFE) if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE)
kvm_vcpu_on_spin(vcpu); kvm_vcpu_on_spin(vcpu);
else else
kvm_vcpu_block(vcpu); kvm_vcpu_block(vcpu);
...@@ -72,29 +74,30 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) ...@@ -72,29 +74,30 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
} }
static exit_handle_fn arm_exit_handlers[] = { static exit_handle_fn arm_exit_handlers[] = {
[ESR_EL2_EC_WFI] = kvm_handle_wfx, [ESR_ELx_EC_WFx] = kvm_handle_wfx,
[ESR_EL2_EC_CP15_32] = kvm_handle_cp15_32, [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
[ESR_EL2_EC_CP15_64] = kvm_handle_cp15_64, [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
[ESR_EL2_EC_CP14_MR] = kvm_handle_cp14_32, [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
[ESR_EL2_EC_CP14_LS] = kvm_handle_cp14_load_store, [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
[ESR_EL2_EC_CP14_64] = kvm_handle_cp14_64, [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
[ESR_EL2_EC_HVC32] = handle_hvc, [ESR_ELx_EC_HVC32] = handle_hvc,
[ESR_EL2_EC_SMC32] = handle_smc, [ESR_ELx_EC_SMC32] = handle_smc,
[ESR_EL2_EC_HVC64] = handle_hvc, [ESR_ELx_EC_HVC64] = handle_hvc,
[ESR_EL2_EC_SMC64] = handle_smc, [ESR_ELx_EC_SMC64] = handle_smc,
[ESR_EL2_EC_SYS64] = kvm_handle_sys_reg, [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
[ESR_EL2_EC_IABT] = kvm_handle_guest_abort, [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
[ESR_EL2_EC_DABT] = kvm_handle_guest_abort, [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
}; };
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{ {
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); u32 hsr = kvm_vcpu_get_hsr(vcpu);
u8 hsr_ec = hsr >> ESR_ELx_EC_SHIFT;
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
!arm_exit_handlers[hsr_ec]) { !arm_exit_handlers[hsr_ec]) {
kvm_err("Unknown exception class: hsr: %#08x\n", kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
(unsigned int)kvm_vcpu_get_hsr(vcpu)); hsr, esr_get_class_string(hsr));
BUG(); BUG();
} }
......
...@@ -17,15 +17,16 @@ ...@@ -17,15 +17,16 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/assembler.h>
#include <asm/memory.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/fpsimdmacros.h> #include <asm/fpsimdmacros.h>
#include <asm/kvm.h> #include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h> #include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmu.h> #include <asm/kvm_mmu.h>
#include <asm/memory.h>
#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
...@@ -1140,9 +1141,9 @@ el1_sync: // Guest trapped into EL2 ...@@ -1140,9 +1141,9 @@ el1_sync: // Guest trapped into EL2
push x2, x3 push x2, x3
mrs x1, esr_el2 mrs x1, esr_el2
lsr x2, x1, #ESR_EL2_EC_SHIFT lsr x2, x1, #ESR_ELx_EC_SHIFT
cmp x2, #ESR_EL2_EC_HVC64 cmp x2, #ESR_ELx_EC_HVC64
b.ne el1_trap b.ne el1_trap
mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
...@@ -1177,13 +1178,13 @@ el1_trap: ...@@ -1177,13 +1178,13 @@ el1_trap:
* x1: ESR * x1: ESR
* x2: ESR_EC * x2: ESR_EC
*/ */
cmp x2, #ESR_EL2_EC_DABT cmp x2, #ESR_ELx_EC_DABT_LOW
mov x0, #ESR_EL2_EC_IABT mov x0, #ESR_ELx_EC_IABT_LOW
ccmp x2, x0, #4, ne ccmp x2, x0, #4, ne
b.ne 1f // Not an abort we care about b.ne 1f // Not an abort we care about
/* This is an abort. Check for permission fault */ /* This is an abort. Check for permission fault */
and x2, x1, #ESR_EL2_FSC_TYPE and x2, x1, #ESR_ELx_FSC_TYPE
cmp x2, #FSC_PERM cmp x2, #FSC_PERM
b.ne 1f // Not a permission fault b.ne 1f // Not a permission fault
......
...@@ -118,27 +118,27 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr ...@@ -118,27 +118,27 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
* instruction set. Report an external synchronous abort. * instruction set. Report an external synchronous abort.
*/ */
if (kvm_vcpu_trap_il_is32bit(vcpu)) if (kvm_vcpu_trap_il_is32bit(vcpu))
esr |= ESR_EL1_IL; esr |= ESR_ELx_IL;
/* /*
* Here, the guest runs in AArch64 mode when in EL1. If we get * Here, the guest runs in AArch64 mode when in EL1. If we get
* an AArch32 fault, it means we managed to trap an EL0 fault. * an AArch32 fault, it means we managed to trap an EL0 fault.
*/ */
if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t) if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
esr |= (ESR_EL1_EC_IABT_EL0 << ESR_EL1_EC_SHIFT); esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
else else
esr |= (ESR_EL1_EC_IABT_EL1 << ESR_EL1_EC_SHIFT); esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
if (!is_iabt) if (!is_iabt)
esr |= ESR_EL1_EC_DABT_EL0; esr |= ESR_ELx_EC_DABT_LOW;
vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_EL2_EC_xABT_xFSR_EXTABT; vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
} }
static void inject_undef64(struct kvm_vcpu *vcpu) static void inject_undef64(struct kvm_vcpu *vcpu)
{ {
unsigned long cpsr = *vcpu_cpsr(vcpu); unsigned long cpsr = *vcpu_cpsr(vcpu);
u32 esr = (ESR_EL1_EC_UNKNOWN << ESR_EL1_EC_SHIFT); u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
*vcpu_spsr(vcpu) = cpsr; *vcpu_spsr(vcpu) = cpsr;
*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu); *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
...@@ -151,7 +151,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) ...@@ -151,7 +151,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
* set. * set.
*/ */
if (kvm_vcpu_trap_il_is32bit(vcpu)) if (kvm_vcpu_trap_il_is32bit(vcpu))
esr |= ESR_EL1_IL; esr |= ESR_ELx_IL;
vcpu_sys_reg(vcpu, ESR_EL1) = esr; vcpu_sys_reg(vcpu, ESR_EL1) = esr;
} }
......
...@@ -20,17 +20,20 @@ ...@@ -20,17 +20,20 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>. * along with this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/mm.h>
#include <linux/kvm_host.h> #include <linux/kvm_host.h>
#include <linux/mm.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_mmu.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/cputype.h> #include <asm/cputype.h>
#include <asm/debug-monitors.h> #include <asm/debug-monitors.h>
#include <asm/esr.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_coproc.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
#include <asm/kvm_mmu.h>
#include <trace/events/kvm.h> #include <trace/events/kvm.h>
#include "sys_regs.h" #include "sys_regs.h"
...@@ -815,12 +818,12 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu, ...@@ -815,12 +818,12 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
int cp; int cp;
switch(hsr_ec) { switch(hsr_ec) {
case ESR_EL2_EC_CP15_32: case ESR_ELx_EC_CP15_32:
case ESR_EL2_EC_CP15_64: case ESR_ELx_EC_CP15_64:
cp = 15; cp = 15;
break; break;
case ESR_EL2_EC_CP14_MR: case ESR_ELx_EC_CP14_MR:
case ESR_EL2_EC_CP14_64: case ESR_ELx_EC_CP14_64:
cp = 14; cp = 14;
break; break;
default: default:
......
...@@ -219,7 +219,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -219,7 +219,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
if (esr & ESR_LNX_EXEC) { if (esr & ESR_LNX_EXEC) {
vm_flags = VM_EXEC; vm_flags = VM_EXEC;
} else if ((esr & ESR_EL1_WRITE) && !(esr & ESR_EL1_CM)) { } else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
vm_flags = VM_WRITE; vm_flags = VM_WRITE;
mm_flags |= FAULT_FLAG_WRITE; mm_flags |= FAULT_FLAG_WRITE;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册