提交 9deb041c 编写于 作者: R Richard Henderson 提交者: David Gibson

target/ppc: Split out gen_st_atomic

Move the guts of ST_ATOMIC to a function.  Use foo_tl for the operations
instead of foo_i32 or foo_i64 specifically.  Use MO_ALIGN instead of an
explicit call to gen_check_align.
Signed-off-by: NRichard Henderson <richard.henderson@linaro.org>
Signed-off-by: NDavid Gibson <david@gibson.dropbear.id.au>
上级 20ba8504
...@@ -3151,54 +3151,55 @@ static void gen_ldat(DisasContext *ctx) ...@@ -3151,54 +3151,55 @@ static void gen_ldat(DisasContext *ctx)
} }
#endif #endif
#define ST_ATOMIC(name, memop, tp, op) \ static void gen_st_atomic(DisasContext *ctx, TCGMemOp memop)
static void gen_##name(DisasContext *ctx) \ {
{ \ uint32_t gpr_FC = FC(ctx->opcode);
int len = MEMOP_GET_SIZE(memop); \ TCGv EA = tcg_temp_new();
uint32_t gpr_FC = FC(ctx->opcode); \ TCGv src, discard;
TCGv EA = tcg_temp_local_new(); \
TCGv_##tp t0, t1; \ gen_addr_register(ctx, EA);
\ src = cpu_gpr[rD(ctx->opcode)];
gen_addr_register(ctx, EA); \ discard = tcg_temp_new();
if (len > 1) { \
gen_check_align(ctx, EA, len - 1); \ memop |= MO_ALIGN;
} \ switch (gpr_FC) {
t0 = tcg_temp_new_##tp(); \ case 0: /* add and Store */
t1 = tcg_temp_new_##tp(); \ tcg_gen_atomic_add_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
tcg_gen_##op(t0, cpu_gpr[rD(ctx->opcode) + 1]); \ break;
\ case 1: /* xor and Store */
switch (gpr_FC) { \ tcg_gen_atomic_xor_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
case 0: /* add and Store */ \ break;
tcg_gen_atomic_add_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \ case 2: /* Or and Store */
break; \ tcg_gen_atomic_or_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
case 1: /* xor and Store */ \ break;
tcg_gen_atomic_xor_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \ case 3: /* 'and' and Store */
break; \ tcg_gen_atomic_and_fetch_tl(discard, EA, src, ctx->mem_idx, memop);
case 2: /* Or and Store */ \ break;
tcg_gen_atomic_or_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \ case 4: /* Store max unsigned */
break; \ case 5: /* Store max signed */
case 3: /* 'and' and Store */ \ case 6: /* Store min unsigned */
tcg_gen_atomic_and_fetch_##tp(t1, EA, t0, ctx->mem_idx, memop); \ case 7: /* Store min signed */
break; \ case 24: /* Store twin */
case 4: /* Store max unsigned */ \ gen_invalid(ctx);
case 5: /* Store max signed */ \ break;
case 6: /* Store min unsigned */ \ default:
case 7: /* Store min signed */ \ /* invoke data storage error handler */
case 24: /* Store twin */ \ gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL);
gen_invalid(ctx); \ }
break; \ tcg_temp_free(discard);
default: \ tcg_temp_free(EA);
/* invoke data storage error handler */ \ }
gen_exception_err(ctx, POWERPC_EXCP_DSI, POWERPC_EXCP_INVAL); \
} \ static void gen_stwat(DisasContext *ctx)
tcg_temp_free_##tp(t0); \ {
tcg_temp_free_##tp(t1); \ gen_st_atomic(ctx, DEF_MEMOP(MO_UL));
tcg_temp_free(EA); \ }
}
#ifdef TARGET_PPC64
ST_ATOMIC(stwat, DEF_MEMOP(MO_UL), i32, trunc_tl_i32) static void gen_stdat(DisasContext *ctx)
#if defined(TARGET_PPC64) {
ST_ATOMIC(stdat, DEF_MEMOP(MO_Q), i64, mov_i64) gen_st_atomic(ctx, DEF_MEMOP(MO_Q));
}
#endif #endif
static void gen_conditional_store(DisasContext *ctx, TCGMemOp memop) static void gen_conditional_store(DisasContext *ctx, TCGMemOp memop)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册