提交 06db8196 编写于 作者: P Peter Maydell

target/arm: Factor out VFP access checking code

Factor out the VFP access checking code so that we can use it in the
leaf functions of the decodetree decoder.

We call the function full_vfp_access_check() so we can keep
the more natural vfp_access_check() for a version which doesn't
have the 'ignore_vfp_enabled' flag -- that way almost all VFP
insns will be able to use vfp_access_check(s) and only the
special-register access function will have to use
full_vfp_access_check(s, ignore_vfp_enabled).
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
Reviewed-by: NRichard Henderson <richard.henderson@linaro.org>
上级 78e138bc
......@@ -29,3 +29,103 @@
/* Include the generated VFP decoder */
#include "decode-vfp.inc.c"
#include "decode-vfp-uncond.inc.c"
/*
* Check that VFP access is enabled. If it is, do the necessary
* M-profile lazy-FP handling and then return true.
* If not, emit code to generate an appropriate exception and
* return false.
* The ignore_vfp_enabled argument specifies that we should ignore
* whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
* accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
*/
static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
{
if (s->fp_excp_el) {
if (arm_dc_feature(s, ARM_FEATURE_M)) {
gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
s->fp_excp_el);
} else {
gen_exception_insn(s, 4, EXCP_UDEF,
syn_fp_access_trap(1, 0xe, false),
s->fp_excp_el);
}
return false;
}
if (!s->vfp_enabled && !ignore_vfp_enabled) {
assert(!arm_dc_feature(s, ARM_FEATURE_M));
gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
default_exception_el(s));
return false;
}
if (arm_dc_feature(s, ARM_FEATURE_M)) {
/* Handle M-profile lazy FP state mechanics */
/* Trigger lazy-state preservation if necessary */
if (s->v7m_lspact) {
/*
* Lazy state saving affects external memory and also the NVIC,
* so we must mark it as an IO operation for icount.
*/
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_v7m_preserve_fp_state(cpu_env);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
/*
* If the preserve_fp_state helper doesn't throw an exception
* then it will clear LSPACT; we don't need to repeat this for
* any further FP insns in this TB.
*/
s->v7m_lspact = false;
}
/* Update ownership of FP context: set FPCCR.S to match current state */
if (s->v8m_fpccr_s_wrong) {
TCGv_i32 tmp;
tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
if (s->v8m_secure) {
tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
} else {
tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
}
store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
/* Don't need to do this for any further FP insns in this TB */
s->v8m_fpccr_s_wrong = false;
}
if (s->v7m_new_fp_ctxt_needed) {
/*
* Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
* and the FPSCR.
*/
TCGv_i32 control, fpscr;
uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
gen_helper_vfp_set_fpscr(cpu_env, fpscr);
tcg_temp_free_i32(fpscr);
/*
* We don't need to arrange to end the TB, because the only
* parts of FPSCR which we cache in the TB flags are the VECLEN
* and VECSTRIDE, and those don't exist for M-profile.
*/
if (s->v8m_secure) {
bits |= R_V7M_CONTROL_SFPA_MASK;
}
control = load_cpu_field(v7m.control[M_REG_S]);
tcg_gen_ori_i32(control, control, bits);
store_cpu_field(control, v7m.control[M_REG_S]);
/* Don't need to do this for any further FP insns in this TB */
s->v7m_new_fp_ctxt_needed = false;
}
}
return true;
}
......@@ -3373,8 +3373,10 @@ static int disas_vfp_misc_insn(DisasContext *s, uint32_t insn)
return 1;
}
/* Disassemble a VFP instruction. Returns nonzero if an error occurred
(ie. an undefined instruction). */
/*
* Disassemble a VFP instruction. Returns nonzero if an error occurred
* (ie. an undefined instruction).
*/
static int disas_vfp_insn(DisasContext *s, uint32_t insn)
{
uint32_t rd, rn, rm, op, i, n, offset, delta_d, delta_m, bank_mask;
......@@ -3382,6 +3384,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
TCGv_i32 addr;
TCGv_i32 tmp;
TCGv_i32 tmp2;
bool ignore_vfp_enabled = false;
if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
return 1;
......@@ -3403,98 +3406,20 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
}
}
/* FIXME: this access check should not take precedence over UNDEF
/*
* FIXME: this access check should not take precedence over UNDEF
* for invalid encodings; we will generate incorrect syndrome information
* for attempts to execute invalid vfp/neon encodings with FP disabled.
*/
if (s->fp_excp_el) {
if (arm_dc_feature(s, ARM_FEATURE_M)) {
gen_exception_insn(s, 4, EXCP_NOCP, syn_uncategorized(),
s->fp_excp_el);
} else {
gen_exception_insn(s, 4, EXCP_UDEF,
syn_fp_access_trap(1, 0xe, false),
s->fp_excp_el);
}
return 0;
}
if (!s->vfp_enabled) {
/* VFP disabled. Only allow fmxr/fmrx to/from some control regs. */
if ((insn & 0x0fe00fff) != 0x0ee00a10)
return 1;
if ((insn & 0x0fe00fff) == 0x0ee00a10) {
rn = (insn >> 16) & 0xf;
if (rn != ARM_VFP_FPSID && rn != ARM_VFP_FPEXC && rn != ARM_VFP_MVFR2
&& rn != ARM_VFP_MVFR1 && rn != ARM_VFP_MVFR0) {
return 1;
if (rn == ARM_VFP_FPSID || rn == ARM_VFP_FPEXC || rn == ARM_VFP_MVFR2
|| rn == ARM_VFP_MVFR1 || rn == ARM_VFP_MVFR0) {
ignore_vfp_enabled = true;
}
}
if (arm_dc_feature(s, ARM_FEATURE_M)) {
/* Handle M-profile lazy FP state mechanics */
/* Trigger lazy-state preservation if necessary */
if (s->v7m_lspact) {
/*
* Lazy state saving affects external memory and also the NVIC,
* so we must mark it as an IO operation for icount.
*/
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_v7m_preserve_fp_state(cpu_env);
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
gen_io_end();
}
/*
* If the preserve_fp_state helper doesn't throw an exception
* then it will clear LSPACT; we don't need to repeat this for
* any further FP insns in this TB.
*/
s->v7m_lspact = false;
}
/* Update ownership of FP context: set FPCCR.S to match current state */
if (s->v8m_fpccr_s_wrong) {
TCGv_i32 tmp;
tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
if (s->v8m_secure) {
tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
} else {
tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
}
store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
/* Don't need to do this for any further FP insns in this TB */
s->v8m_fpccr_s_wrong = false;
}
if (s->v7m_new_fp_ctxt_needed) {
/*
* Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA
* and the FPSCR.
*/
TCGv_i32 control, fpscr;
uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
gen_helper_vfp_set_fpscr(cpu_env, fpscr);
tcg_temp_free_i32(fpscr);
/*
* We don't need to arrange to end the TB, because the only
* parts of FPSCR which we cache in the TB flags are the VECLEN
* and VECSTRIDE, and those don't exist for M-profile.
*/
if (s->v8m_secure) {
bits |= R_V7M_CONTROL_SFPA_MASK;
}
control = load_cpu_field(v7m.control[M_REG_S]);
tcg_gen_ori_i32(control, control, bits);
store_cpu_field(control, v7m.control[M_REG_S]);
/* Don't need to do this for any further FP insns in this TB */
s->v7m_new_fp_ctxt_needed = false;
}
if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
return 0;
}
if (extract32(insn, 28, 4) == 0xf) {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册