提交 023261ef 编写于 作者: R Richard Henderson

tcg-aarch64: Update to helper_ret_*_mmu routines

A minimal update to use the new helpers with the return address argument.
Tested-by: NClaudio Fontana <claudio.fontana@linaro.org>
Reviewed-by: NClaudio Fontana <claudio.fontana@linaro.org>
Signed-off-by: NRichard Henderson <rth@twiddle.net>
上级 84fd9dd3
......@@ -320,24 +320,6 @@ extern uintptr_t tci_tb_ptr;
#define GETPC() (GETRA() - GETPC_ADJ)
/* The LDST optimizations splits code generation into fast and slow path.
In some implementations, we pass the "logical" return address manually;
in others, we must infer the logical return from the true return. */
#if defined(CONFIG_QEMU_LDST_OPTIMIZATION) && defined(CONFIG_SOFTMMU)
# if defined(__aarch64__)
# define GETRA_LDST(RA) tcg_getra_ldst(RA)
static inline uintptr_t tcg_getra_ldst(uintptr_t ra)
{
int32_t b;
ra += 4; /* skip one instruction */
b = *(int32_t *)ra; /* load the branch insn */
b = (b << 6) >> (6 - 2); /* extract the displacement */
ra += b; /* apply the displacement */
return ra;
}
# endif
#endif /* CONFIG_QEMU_LDST_OPTIMIZATION */
/* ??? Delete these once they are no longer used. */
bool is_tcg_gen_code(uintptr_t pc_ptr);
#ifdef GETRA_LDST
......
......@@ -778,22 +778,24 @@ static inline void tcg_out_nop(TCGContext *s)
}
#ifdef CONFIG_SOFTMMU
/* helper signature: helper_ld_mmu(CPUState *env, target_ulong addr,
int mmu_idx) */
/* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr,
* int mmu_idx, uintptr_t ra)
*/
static const void * const qemu_ld_helpers[4] = {
helper_ldb_mmu,
helper_ldw_mmu,
helper_ldl_mmu,
helper_ldq_mmu,
helper_ret_ldub_mmu,
helper_ret_lduw_mmu,
helper_ret_ldul_mmu,
helper_ret_ldq_mmu,
};
/* helper signature: helper_st_mmu(CPUState *env, target_ulong addr,
uintxx_t val, int mmu_idx) */
/* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr,
* uintxx_t val, int mmu_idx, uintptr_t ra)
*/
static const void * const qemu_st_helpers[4] = {
helper_stb_mmu,
helper_stw_mmu,
helper_stl_mmu,
helper_stq_mmu,
helper_ret_stb_mmu,
helper_ret_stw_mmu,
helper_ret_stl_mmu,
helper_ret_stq_mmu,
};
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
......@@ -802,6 +804,7 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_movr(s, 1, TCG_REG_X0, TCG_AREG0);
tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X2, lb->mem_index);
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X3, (tcg_target_long)lb->raddr);
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
(tcg_target_long)qemu_ld_helpers[lb->opc & 3]);
tcg_out_callr(s, TCG_REG_TMP);
......@@ -822,6 +825,7 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *lb)
tcg_out_movr(s, (TARGET_LONG_BITS == 64), TCG_REG_X1, lb->addrlo_reg);
tcg_out_movr(s, 1, TCG_REG_X2, lb->datalo_reg);
tcg_out_movi(s, TCG_TYPE_I32, TCG_REG_X3, lb->mem_index);
tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_X4, (tcg_target_long)lb->raddr);
tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP,
(tcg_target_long)qemu_st_helpers[lb->opc & 3]);
tcg_out_callr(s, TCG_REG_TMP);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册