提交 9dbbc748 编写于 作者: G Greg Bellows 提交者: Peter Maydell

target-arm: Extend FP checks to use an EL

Extend the ARM disassemble context to take a target exception EL instead of a
boolean enable. This change reverses the polarity of the check making a value
of 0 indicate floating point enabled (no exception).
Signed-off-by: NGreg Bellows <greg.bellows@linaro.org>
[PMM: Use a common TB flag field for AArch32 and AArch64;
 CPTR_EL2 exists in v7; CPTR_EL2 should trap for EL2 accesses;
 CPTR_EL2 should not trap for secure accesses; CPTR_EL3
 should trap for EL3 accesses; CPACR traps for secure
 accesses should trap to EL3 if EL3 is AArch32]
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
Reviewed-by: NEdgar E. Iglesias <edgar.iglesias@xilinx.com>
上级 3cf6a0fc
...@@ -1741,6 +1741,9 @@ static inline bool arm_singlestep_active(CPUARMState *env) ...@@ -1741,6 +1741,9 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT) #define ARM_TBFLAG_SS_ACTIVE_MASK (1 << ARM_TBFLAG_SS_ACTIVE_SHIFT)
#define ARM_TBFLAG_PSTATE_SS_SHIFT 26 #define ARM_TBFLAG_PSTATE_SS_SHIFT 26
#define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT) #define ARM_TBFLAG_PSTATE_SS_MASK (1 << ARM_TBFLAG_PSTATE_SS_SHIFT)
/* Target EL if we take a floating-point-disabled exception */
#define ARM_TBFLAG_FPEXC_EL_SHIFT 24
#define ARM_TBFLAG_FPEXC_EL_MASK (0x3 << ARM_TBFLAG_FPEXC_EL_SHIFT)
/* Bit usage when in AArch32 state: */ /* Bit usage when in AArch32 state: */
#define ARM_TBFLAG_THUMB_SHIFT 0 #define ARM_TBFLAG_THUMB_SHIFT 0
...@@ -1755,8 +1758,6 @@ static inline bool arm_singlestep_active(CPUARMState *env) ...@@ -1755,8 +1758,6 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT) #define ARM_TBFLAG_CONDEXEC_MASK (0xff << ARM_TBFLAG_CONDEXEC_SHIFT)
#define ARM_TBFLAG_BSWAP_CODE_SHIFT 16 #define ARM_TBFLAG_BSWAP_CODE_SHIFT 16
#define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT) #define ARM_TBFLAG_BSWAP_CODE_MASK (1 << ARM_TBFLAG_BSWAP_CODE_SHIFT)
#define ARM_TBFLAG_CPACR_FPEN_SHIFT 17
#define ARM_TBFLAG_CPACR_FPEN_MASK (1 << ARM_TBFLAG_CPACR_FPEN_SHIFT)
/* We store the bottom two bits of the CPAR as TB flags and handle /* We store the bottom two bits of the CPAR as TB flags and handle
* checks on the other bits at runtime * checks on the other bits at runtime
*/ */
...@@ -1769,9 +1770,7 @@ static inline bool arm_singlestep_active(CPUARMState *env) ...@@ -1769,9 +1770,7 @@ static inline bool arm_singlestep_active(CPUARMState *env)
#define ARM_TBFLAG_NS_SHIFT 22 #define ARM_TBFLAG_NS_SHIFT 22
#define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT) #define ARM_TBFLAG_NS_MASK (1 << ARM_TBFLAG_NS_SHIFT)
/* Bit usage when in AArch64 state */ /* Bit usage when in AArch64 state: currently we have no A64 specific bits */
#define ARM_TBFLAG_AA64_FPEN_SHIFT 2
#define ARM_TBFLAG_AA64_FPEN_MASK (1 << ARM_TBFLAG_AA64_FPEN_SHIFT)
/* some convenience accessor macros */ /* some convenience accessor macros */
#define ARM_TBFLAG_AARCH64_STATE(F) \ #define ARM_TBFLAG_AARCH64_STATE(F) \
...@@ -1782,6 +1781,8 @@ static inline bool arm_singlestep_active(CPUARMState *env) ...@@ -1782,6 +1781,8 @@ static inline bool arm_singlestep_active(CPUARMState *env)
(((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT) (((F) & ARM_TBFLAG_SS_ACTIVE_MASK) >> ARM_TBFLAG_SS_ACTIVE_SHIFT)
#define ARM_TBFLAG_PSTATE_SS(F) \ #define ARM_TBFLAG_PSTATE_SS(F) \
(((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT) (((F) & ARM_TBFLAG_PSTATE_SS_MASK) >> ARM_TBFLAG_PSTATE_SS_SHIFT)
#define ARM_TBFLAG_FPEXC_EL(F) \
(((F) & ARM_TBFLAG_FPEXC_EL_MASK) >> ARM_TBFLAG_FPEXC_EL_SHIFT)
#define ARM_TBFLAG_THUMB(F) \ #define ARM_TBFLAG_THUMB(F) \
(((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT) (((F) & ARM_TBFLAG_THUMB_MASK) >> ARM_TBFLAG_THUMB_SHIFT)
#define ARM_TBFLAG_VECLEN(F) \ #define ARM_TBFLAG_VECLEN(F) \
...@@ -1794,33 +1795,82 @@ static inline bool arm_singlestep_active(CPUARMState *env) ...@@ -1794,33 +1795,82 @@ static inline bool arm_singlestep_active(CPUARMState *env)
(((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT) (((F) & ARM_TBFLAG_CONDEXEC_MASK) >> ARM_TBFLAG_CONDEXEC_SHIFT)
#define ARM_TBFLAG_BSWAP_CODE(F) \ #define ARM_TBFLAG_BSWAP_CODE(F) \
(((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT) (((F) & ARM_TBFLAG_BSWAP_CODE_MASK) >> ARM_TBFLAG_BSWAP_CODE_SHIFT)
#define ARM_TBFLAG_CPACR_FPEN(F) \
(((F) & ARM_TBFLAG_CPACR_FPEN_MASK) >> ARM_TBFLAG_CPACR_FPEN_SHIFT)
#define ARM_TBFLAG_XSCALE_CPAR(F) \ #define ARM_TBFLAG_XSCALE_CPAR(F) \
(((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT) (((F) & ARM_TBFLAG_XSCALE_CPAR_MASK) >> ARM_TBFLAG_XSCALE_CPAR_SHIFT)
#define ARM_TBFLAG_AA64_FPEN(F) \
(((F) & ARM_TBFLAG_AA64_FPEN_MASK) >> ARM_TBFLAG_AA64_FPEN_SHIFT)
#define ARM_TBFLAG_NS(F) \ #define ARM_TBFLAG_NS(F) \
(((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT) (((F) & ARM_TBFLAG_NS_MASK) >> ARM_TBFLAG_NS_SHIFT)
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, /* Return the exception level to which FP-disabled exceptions should
target_ulong *cs_base, int *flags) * be taken, or 0 if FP is enabled.
*/
static inline int fp_exception_el(CPUARMState *env)
{ {
int fpen; int fpen;
int cur_el = arm_current_el(env);
if (arm_feature(env, ARM_FEATURE_V6)) { /* CPACR and the CPTR registers don't exist before v6, so FP is
fpen = extract32(env->cp15.cpacr_el1, 20, 2); * always accessible
} else { */
/* CPACR doesn't exist before v6, so VFP is always accessible */ if (!arm_feature(env, ARM_FEATURE_V6)) {
fpen = 3; return 0;
}
/* The CPACR controls traps to EL1, or PL1 if we're 32 bit:
* 0, 2 : trap EL0 and EL1/PL1 accesses
* 1 : trap only EL0 accesses
* 3 : trap no accesses
*/
fpen = extract32(env->cp15.cpacr_el1, 20, 2);
switch (fpen) {
case 0:
case 2:
if (cur_el == 0 || cur_el == 1) {
/* Trap to PL1, which might be EL1 or EL3 */
if (arm_is_secure(env) && !arm_el_is_aa64(env, 3)) {
return 3;
}
return 1;
}
if (cur_el == 3 && !is_a64(env)) {
/* Secure PL1 running at EL3 */
return 3;
}
break;
case 1:
if (cur_el == 0) {
return 1;
}
break;
case 3:
break;
}
/* For the CPTR registers we don't need to guard with an ARM_FEATURE
* check because zero bits in the registers mean "don't trap".
*/
/* CPTR_EL2 : present in v7VE or v8 */
if (cur_el <= 2 && extract32(env->cp15.cptr_el[2], 10, 1)
&& !arm_is_secure_below_el3(env)) {
/* Trap FP ops at EL2, NS-EL1 or NS-EL0 to EL2 */
return 2;
}
/* CPTR_EL3 : present in v8 */
if (extract32(env->cp15.cptr_el[3], 10, 1)) {
/* Trap all FP ops to EL3 */
return 3;
} }
return 0;
}
static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
target_ulong *cs_base, int *flags)
{
if (is_a64(env)) { if (is_a64(env)) {
*pc = env->pc; *pc = env->pc;
*flags = ARM_TBFLAG_AARCH64_STATE_MASK; *flags = ARM_TBFLAG_AARCH64_STATE_MASK;
if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
*flags |= ARM_TBFLAG_AA64_FPEN_MASK;
}
} else { } else {
*pc = env->regs[15]; *pc = env->regs[15];
*flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT) *flags = (env->thumb << ARM_TBFLAG_THUMB_SHIFT)
...@@ -1835,9 +1885,6 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, ...@@ -1835,9 +1885,6 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|| arm_el_is_aa64(env, 1)) { || arm_el_is_aa64(env, 1)) {
*flags |= ARM_TBFLAG_VFPEN_MASK; *flags |= ARM_TBFLAG_VFPEN_MASK;
} }
if (fpen == 3 || (fpen == 1 && arm_current_el(env) != 0)) {
*flags |= ARM_TBFLAG_CPACR_FPEN_MASK;
}
*flags |= (extract32(env->cp15.c15_cpar, 0, 2) *flags |= (extract32(env->cp15.c15_cpar, 0, 2)
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT); << ARM_TBFLAG_XSCALE_CPAR_SHIFT);
} }
...@@ -1862,6 +1909,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc, ...@@ -1862,6 +1909,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
} }
} }
} }
*flags |= fp_exception_el(env) << ARM_TBFLAG_FPEXC_EL_SHIFT;
*cs_base = 0; *cs_base = 0;
} }
......
...@@ -412,7 +412,7 @@ static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf) ...@@ -412,7 +412,7 @@ static TCGv_i64 read_cpu_reg_sp(DisasContext *s, int reg, int sf)
static inline void assert_fp_access_checked(DisasContext *s) static inline void assert_fp_access_checked(DisasContext *s)
{ {
#ifdef CONFIG_DEBUG_TCG #ifdef CONFIG_DEBUG_TCG
if (unlikely(!s->fp_access_checked || !s->cpacr_fpen)) { if (unlikely(!s->fp_access_checked || s->fp_excp_el)) {
fprintf(stderr, "target-arm: FP access check missing for " fprintf(stderr, "target-arm: FP access check missing for "
"instruction 0x%08x\n", s->insn); "instruction 0x%08x\n", s->insn);
abort(); abort();
...@@ -972,12 +972,12 @@ static inline bool fp_access_check(DisasContext *s) ...@@ -972,12 +972,12 @@ static inline bool fp_access_check(DisasContext *s)
assert(!s->fp_access_checked); assert(!s->fp_access_checked);
s->fp_access_checked = true; s->fp_access_checked = true;
if (s->cpacr_fpen) { if (!s->fp_excp_el) {
return true; return true;
} }
gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false), gen_exception_insn(s, 4, EXCP_UDEF, syn_fp_access_trap(1, 0xe, false),
default_exception_el(s)); s->fp_excp_el);
return false; return false;
} }
...@@ -10954,7 +10954,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu, ...@@ -10954,7 +10954,7 @@ void gen_intermediate_code_internal_a64(ARMCPU *cpu,
#if !defined(CONFIG_USER_ONLY) #if !defined(CONFIG_USER_ONLY)
dc->user = (dc->current_el == 0); dc->user = (dc->current_el == 0);
#endif #endif
dc->cpacr_fpen = ARM_TBFLAG_AA64_FPEN(tb->flags); dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
dc->vec_len = 0; dc->vec_len = 0;
dc->vec_stride = 0; dc->vec_stride = 0;
dc->cp_regs = cpu->cp_regs; dc->cp_regs = cpu->cp_regs;
......
...@@ -3044,10 +3044,9 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn) ...@@ -3044,10 +3044,9 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
* for invalid encodings; we will generate incorrect syndrome information * for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled. * for attempts to execute invalid vfp/neon encodings with FP disabled.
*/ */
if (!s->cpacr_fpen) { if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF, gen_exception_insn(s, 4, EXCP_UDEF,
syn_fp_access_trap(1, 0xe, s->thumb), syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
default_exception_el(s));
return 0; return 0;
} }
...@@ -4363,10 +4362,9 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn) ...@@ -4363,10 +4362,9 @@ static int disas_neon_ls_insn(DisasContext *s, uint32_t insn)
* for invalid encodings; we will generate incorrect syndrome information * for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled. * for attempts to execute invalid vfp/neon encodings with FP disabled.
*/ */
if (!s->cpacr_fpen) { if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF, gen_exception_insn(s, 4, EXCP_UDEF,
syn_fp_access_trap(1, 0xe, s->thumb), syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
default_exception_el(s));
return 0; return 0;
} }
...@@ -5102,10 +5100,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn) ...@@ -5102,10 +5100,9 @@ static int disas_neon_data_insn(DisasContext *s, uint32_t insn)
* for invalid encodings; we will generate incorrect syndrome information * for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled. * for attempts to execute invalid vfp/neon encodings with FP disabled.
*/ */
if (!s->cpacr_fpen) { if (s->fp_excp_el) {
gen_exception_insn(s, 4, EXCP_UDEF, gen_exception_insn(s, 4, EXCP_UDEF,
syn_fp_access_trap(1, 0xe, s->thumb), syn_fp_access_trap(1, 0xe, s->thumb), s->fp_excp_el);
default_exception_el(s));
return 0; return 0;
} }
...@@ -11082,7 +11079,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu, ...@@ -11082,7 +11079,7 @@ static inline void gen_intermediate_code_internal(ARMCPU *cpu,
dc->user = (dc->current_el == 0); dc->user = (dc->current_el == 0);
#endif #endif
dc->ns = ARM_TBFLAG_NS(tb->flags); dc->ns = ARM_TBFLAG_NS(tb->flags);
dc->cpacr_fpen = ARM_TBFLAG_CPACR_FPEN(tb->flags); dc->fp_excp_el = ARM_TBFLAG_FPEXC_EL(tb->flags);
dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags); dc->vfp_enabled = ARM_TBFLAG_VFPEN(tb->flags);
dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags); dc->vec_len = ARM_TBFLAG_VECLEN(tb->flags);
dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags); dc->vec_stride = ARM_TBFLAG_VECSTRIDE(tb->flags);
......
...@@ -22,7 +22,7 @@ typedef struct DisasContext { ...@@ -22,7 +22,7 @@ typedef struct DisasContext {
#endif #endif
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */ ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
bool ns; /* Use non-secure CPREG bank on access */ bool ns; /* Use non-secure CPREG bank on access */
bool cpacr_fpen; /* FP enabled via CPACR.FPEN */ int fp_excp_el; /* FP exception EL or 0 if enabled */
bool el3_is_aa64; /* Flag indicating whether EL3 is AArch64 or not */ bool el3_is_aa64; /* Flag indicating whether EL3 is AArch64 or not */
bool vfp_enabled; /* FP enabled via FPSCR.EN */ bool vfp_enabled; /* FP enabled via FPSCR.EN */
int vec_len; int vec_len;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册