提交 396e467c 编写于 作者: F Filip Navara 提交者: Aurelien Jarno

target-arm: replace thumb usage of cpu_T registers by proper register allocations

The goal is eventually to get rid of all cpu_T register usage and to use
just short-lived tmp/tmp2 registers. This patch converts all the places where
cpu_T was used in the Thumb code and replaces it with explicit TCG register
allocation.
Signed-off-by: NFilip Navara <filip.navara@gmail.com>
Signed-off-by: NAurelien Jarno <aurelien@aurel32.net>
上级 15bb4eac
...@@ -190,19 +190,11 @@ static void store_reg(DisasContext *s, int reg, TCGv var) ...@@ -190,19 +190,11 @@ static void store_reg(DisasContext *s, int reg, TCGv var)
#define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1]) #define gen_op_subl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0]) #define gen_op_rsbl_T0_T1() tcg_gen_sub_i32(cpu_T[0], cpu_T[1], cpu_T[0])
#define gen_op_addl_T0_T1_cc() gen_helper_add_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_adcl_T0_T1_cc() gen_helper_adc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_subl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_sbcl_T0_T1_cc() gen_helper_sbc_cc(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_rsbl_T0_T1_cc() gen_helper_sub_cc(cpu_T[0], cpu_T[1], cpu_T[0])
#define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1]) #define gen_op_andl_T0_T1() tcg_gen_and_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1]) #define gen_op_xorl_T0_T1() tcg_gen_xor_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1]) #define gen_op_orl_T0_T1() tcg_gen_or_i32(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0]) #define gen_op_notl_T0() tcg_gen_not_i32(cpu_T[0], cpu_T[0])
#define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1]) #define gen_op_notl_T1() tcg_gen_not_i32(cpu_T[1], cpu_T[1])
#define gen_op_logic_T0_cc() gen_logic_CC(cpu_T[0]);
#define gen_op_logic_T1_cc() gen_logic_CC(cpu_T[1]);
#define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im) #define gen_op_shll_T1_im(im) tcg_gen_shli_i32(cpu_T[1], cpu_T[1], im)
#define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im) #define gen_op_shrl_T1_im(im) tcg_gen_shri_i32(cpu_T[1], cpu_T[1], im)
...@@ -338,17 +330,17 @@ static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b) ...@@ -338,17 +330,17 @@ static TCGv_i64 gen_muls_i64_i32(TCGv a, TCGv b)
} }
/* Unsigned 32x32->64 multiply. */ /* Unsigned 32x32->64 multiply. */
static void gen_op_mull_T0_T1(void) static void gen_mull(TCGv a, TCGv b)
{ {
TCGv_i64 tmp1 = tcg_temp_new_i64(); TCGv_i64 tmp1 = tcg_temp_new_i64();
TCGv_i64 tmp2 = tcg_temp_new_i64(); TCGv_i64 tmp2 = tcg_temp_new_i64();
tcg_gen_extu_i32_i64(tmp1, cpu_T[0]); tcg_gen_extu_i32_i64(tmp1, a);
tcg_gen_extu_i32_i64(tmp2, cpu_T[1]); tcg_gen_extu_i32_i64(tmp2, b);
tcg_gen_mul_i64(tmp1, tmp1, tmp2); tcg_gen_mul_i64(tmp1, tmp1, tmp2);
tcg_gen_trunc_i64_i32(cpu_T[0], tmp1); tcg_gen_trunc_i64_i32(a, tmp1);
tcg_gen_shri_i64(tmp1, tmp1, 32); tcg_gen_shri_i64(tmp1, tmp1, 32);
tcg_gen_trunc_i64_i32(cpu_T[1], tmp1); tcg_gen_trunc_i64_i32(b, tmp1);
} }
/* Signed 32x32->64 multiply. */ /* Signed 32x32->64 multiply. */
...@@ -414,12 +406,12 @@ static inline void gen_logic_CC(TCGv var) ...@@ -414,12 +406,12 @@ static inline void gen_logic_CC(TCGv var)
} }
/* T0 += T1 + CF. */ /* T0 += T1 + CF. */
static void gen_adc_T0_T1(void) static void gen_adc(TCGv t0, TCGv t1)
{ {
TCGv tmp; TCGv tmp;
gen_op_addl_T0_T1(); tcg_gen_add_i32(t0, t0, t1);
tmp = load_cpu_field(CF); tmp = load_cpu_field(CF);
tcg_gen_add_i32(cpu_T[0], cpu_T[0], tmp); tcg_gen_add_i32(t0, t0, tmp);
dead_tmp(tmp); dead_tmp(tmp);
} }
...@@ -444,9 +436,6 @@ static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1) ...@@ -444,9 +436,6 @@ static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
dead_tmp(tmp); dead_tmp(tmp);
} }
#define gen_sbc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[0], cpu_T[1])
#define gen_rsc_T0_T1() gen_sub_carry(cpu_T[0], cpu_T[1], cpu_T[0])
/* T0 &= ~T1. Clobbers T1. */ /* T0 &= ~T1. Clobbers T1. */
/* FIXME: Implement bic natively. */ /* FIXME: Implement bic natively. */
static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1) static inline void tcg_gen_bic_i32(TCGv dest, TCGv t0, TCGv t1)
...@@ -7064,70 +7053,70 @@ thumb2_logic_op(int op) ...@@ -7064,70 +7053,70 @@ thumb2_logic_op(int op)
Returns zero if the opcode is valid. */ Returns zero if the opcode is valid. */
static int static int
gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out) gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out, TCGv t0, TCGv t1)
{ {
int logic_cc; int logic_cc;
logic_cc = 0; logic_cc = 0;
switch (op) { switch (op) {
case 0: /* and */ case 0: /* and */
gen_op_andl_T0_T1(); tcg_gen_and_i32(t0, t0, t1);
logic_cc = conds; logic_cc = conds;
break; break;
case 1: /* bic */ case 1: /* bic */
gen_op_bicl_T0_T1(); tcg_gen_bic_i32(t0, t0, t1);
logic_cc = conds; logic_cc = conds;
break; break;
case 2: /* orr */ case 2: /* orr */
gen_op_orl_T0_T1(); tcg_gen_or_i32(t0, t0, t1);
logic_cc = conds; logic_cc = conds;
break; break;
case 3: /* orn */ case 3: /* orn */
gen_op_notl_T1(); tcg_gen_not_i32(t1, t1);
gen_op_orl_T0_T1(); tcg_gen_or_i32(t0, t0, t1);
logic_cc = conds; logic_cc = conds;
break; break;
case 4: /* eor */ case 4: /* eor */
gen_op_xorl_T0_T1(); tcg_gen_xor_i32(t0, t0, t1);
logic_cc = conds; logic_cc = conds;
break; break;
case 8: /* add */ case 8: /* add */
if (conds) if (conds)
gen_op_addl_T0_T1_cc(); gen_helper_add_cc(t0, t0, t1);
else else
gen_op_addl_T0_T1(); tcg_gen_add_i32(t0, t0, t1);
break; break;
case 10: /* adc */ case 10: /* adc */
if (conds) if (conds)
gen_op_adcl_T0_T1_cc(); gen_helper_adc_cc(t0, t0, t1);
else else
gen_adc_T0_T1(); gen_adc(t0, t1);
break; break;
case 11: /* sbc */ case 11: /* sbc */
if (conds) if (conds)
gen_op_sbcl_T0_T1_cc(); gen_helper_sbc_cc(t0, t0, t1);
else else
gen_sbc_T0_T1(); gen_sub_carry(t0, t0, t1);
break; break;
case 13: /* sub */ case 13: /* sub */
if (conds) if (conds)
gen_op_subl_T0_T1_cc(); gen_helper_sub_cc(t0, t0, t1);
else else
gen_op_subl_T0_T1(); tcg_gen_sub_i32(t0, t0, t1);
break; break;
case 14: /* rsb */ case 14: /* rsb */
if (conds) if (conds)
gen_op_rsbl_T0_T1_cc(); gen_helper_sub_cc(t0, t1, t0);
else else
gen_op_rsbl_T0_T1(); tcg_gen_sub_i32(t0, t1, t0);
break; break;
default: /* 5, 6, 7, 9, 12, 15. */ default: /* 5, 6, 7, 9, 12, 15. */
return 1; return 1;
} }
if (logic_cc) { if (logic_cc) {
gen_op_logic_T0_cc(); gen_logic_CC(t0);
if (shifter_out) if (shifter_out)
gen_set_CF_bit31(cpu_T[1]); gen_set_CF_bit31(t1);
} }
return 0; return 0;
} }
...@@ -7183,8 +7172,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) ...@@ -7183,8 +7172,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
16-bit instructions in case the second half causes an 16-bit instructions in case the second half causes an
prefetch abort. */ prefetch abort. */
offset = ((int32_t)insn << 21) >> 9; offset = ((int32_t)insn << 21) >> 9;
gen_op_movl_T0_im(s->pc + 2 + offset); tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
gen_movl_reg_T0(s, 14);
return 0; return 0;
} }
/* Fall through to 32-bit decode. */ /* Fall through to 32-bit decode. */
...@@ -7463,7 +7451,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) ...@@ -7463,7 +7451,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
conds = (insn & (1 << 20)) != 0; conds = (insn & (1 << 20)) != 0;
logic_cc = (conds && thumb2_logic_op(op)); logic_cc = (conds && thumb2_logic_op(op));
gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc); gen_arm_shift_im(cpu_T[1], shiftop, shift, logic_cc);
if (gen_thumb2_data_op(s, op, conds, 0)) if (gen_thumb2_data_op(s, op, conds, 0, cpu_T[0], cpu_T[1]))
goto illegal_op; goto illegal_op;
if (rd != 15) if (rd != 15)
gen_movl_reg_T0(s, rd); gen_movl_reg_T0(s, rd);
...@@ -8025,7 +8013,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1) ...@@ -8025,7 +8013,7 @@ static int disas_thumb2_insn(CPUState *env, DisasContext *s, uint16_t insn_hw1)
gen_movl_T0_reg(s, rn); gen_movl_T0_reg(s, rn);
op = (insn >> 21) & 0xf; op = (insn >> 21) & 0xf;
if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0, if (gen_thumb2_data_op(s, op, (insn & (1 << 20)) != 0,
shifter_out)) shifter_out, cpu_T[0], cpu_T[1]))
goto illegal_op; goto illegal_op;
rd = (insn >> 8) & 0xf; rd = (insn >> 8) & 0xf;
if (rd != 15) { if (rd != 15) {
...@@ -8173,32 +8161,35 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) ...@@ -8173,32 +8161,35 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
switch (insn >> 12) { switch (insn >> 12) {
case 0: case 1: case 0: case 1:
rd = insn & 7; rd = insn & 7;
op = (insn >> 11) & 3; op = (insn >> 11) & 3;
if (op == 3) { if (op == 3) {
/* add/subtract */ /* add/subtract */
rn = (insn >> 3) & 7; rn = (insn >> 3) & 7;
gen_movl_T0_reg(s, rn); tmp = load_reg(s, rn);
if (insn & (1 << 10)) { if (insn & (1 << 10)) {
/* immediate */ /* immediate */
gen_op_movl_T1_im((insn >> 6) & 7); tmp2 = new_tmp();
tcg_gen_movi_i32(tmp2, (insn >> 6) & 7);
} else { } else {
/* reg */ /* reg */
rm = (insn >> 6) & 7; rm = (insn >> 6) & 7;
gen_movl_T1_reg(s, rm); tmp2 = load_reg(s, rm);
} }
if (insn & (1 << 9)) { if (insn & (1 << 9)) {
if (s->condexec_mask) if (s->condexec_mask)
gen_op_subl_T0_T1(); tcg_gen_sub_i32(tmp, tmp, tmp2);
else else
gen_op_subl_T0_T1_cc(); gen_helper_sub_cc(tmp, tmp, tmp2);
} else { } else {
if (s->condexec_mask) if (s->condexec_mask)
gen_op_addl_T0_T1(); tcg_gen_add_i32(tmp, tmp, tmp2);
else else
gen_op_addl_T0_T1_cc(); gen_helper_add_cc(tmp, tmp, tmp2);
} }
gen_movl_reg_T0(s, rd); dead_tmp(tmp2);
store_reg(s, rd, tmp);
} else { } else {
/* shift immediate */ /* shift immediate */
rm = (insn >> 3) & 7; rm = (insn >> 3) & 7;
...@@ -8214,35 +8205,40 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) ...@@ -8214,35 +8205,40 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
/* arithmetic large immediate */ /* arithmetic large immediate */
op = (insn >> 11) & 3; op = (insn >> 11) & 3;
rd = (insn >> 8) & 0x7; rd = (insn >> 8) & 0x7;
if (op == 0) { if (op == 0) { /* mov */
gen_op_movl_T0_im(insn & 0xff); tmp = new_tmp();
} else { tcg_gen_movi_i32(tmp, insn & 0xff);
gen_movl_T0_reg(s, rd);
gen_op_movl_T1_im(insn & 0xff);
}
switch (op) {
case 0: /* mov */
if (!s->condexec_mask) if (!s->condexec_mask)
gen_op_logic_T0_cc(); gen_logic_CC(tmp);
break; store_reg(s, rd, tmp);
case 1: /* cmp */ } else {
gen_op_subl_T0_T1_cc(); tmp = load_reg(s, rd);
break; tmp2 = new_tmp();
case 2: /* add */ tcg_gen_movi_i32(tmp2, insn & 0xff);
if (s->condexec_mask) switch (op) {
gen_op_addl_T0_T1(); case 1: /* cmp */
else gen_helper_sub_cc(tmp, tmp, tmp2);
gen_op_addl_T0_T1_cc(); dead_tmp(tmp);
break; dead_tmp(tmp2);
case 3: /* sub */ break;
if (s->condexec_mask) case 2: /* add */
gen_op_subl_T0_T1(); if (s->condexec_mask)
else tcg_gen_add_i32(tmp, tmp, tmp2);
gen_op_subl_T0_T1_cc(); else
break; gen_helper_add_cc(tmp, tmp, tmp2);
dead_tmp(tmp2);
store_reg(s, rd, tmp);
break;
case 3: /* sub */
if (s->condexec_mask)
tcg_gen_sub_i32(tmp, tmp, tmp2);
else
gen_helper_sub_cc(tmp, tmp, tmp2);
dead_tmp(tmp2);
store_reg(s, rd, tmp);
break;
}
} }
if (op != 1)
gen_movl_reg_T0(s, rd);
break; break;
case 4: case 4:
if (insn & (1 << 11)) { if (insn & (1 << 11)) {
...@@ -8264,19 +8260,22 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) ...@@ -8264,19 +8260,22 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
op = (insn >> 8) & 3; op = (insn >> 8) & 3;
switch (op) { switch (op) {
case 0: /* add */ case 0: /* add */
gen_movl_T0_reg(s, rd); tmp = load_reg(s, rd);
gen_movl_T1_reg(s, rm); tmp2 = load_reg(s, rm);
gen_op_addl_T0_T1(); tcg_gen_add_i32(tmp, tmp, tmp2);
gen_movl_reg_T0(s, rd); dead_tmp(tmp2);
store_reg(s, rd, tmp);
break; break;
case 1: /* cmp */ case 1: /* cmp */
gen_movl_T0_reg(s, rd); tmp = load_reg(s, rd);
gen_movl_T1_reg(s, rm); tmp2 = load_reg(s, rm);
gen_op_subl_T0_T1_cc(); gen_helper_sub_cc(tmp, tmp, tmp2);
dead_tmp(tmp2);
dead_tmp(tmp);
break; break;
case 2: /* mov/cpy */ case 2: /* mov/cpy */
gen_movl_T0_reg(s, rm); tmp = load_reg(s, rm);
gen_movl_reg_T0(s, rd); store_reg(s, rd, tmp);
break; break;
case 3:/* branch [and link] exchange thumb register */ case 3:/* branch [and link] exchange thumb register */
tmp = load_reg(s, rm); tmp = load_reg(s, rm);
...@@ -8306,114 +8305,125 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s) ...@@ -8306,114 +8305,125 @@ static void disas_thumb_insn(CPUState *env, DisasContext *s)
val = 0; val = 0;
} }
if (op == 9) /* neg */ if (op == 9) { /* neg */
gen_op_movl_T0_im(0); tmp = new_tmp();
else if (op != 0xf) /* mvn doesn't read its first operand */ tcg_gen_movi_i32(tmp, 0);
gen_movl_T0_reg(s, rd); } else if (op != 0xf) { /* mvn doesn't read its first operand */
tmp = load_reg(s, rd);
} else {
TCGV_UNUSED(tmp);
}
gen_movl_T1_reg(s, rm); tmp2 = load_reg(s, rm);
switch (op) { switch (op) {
case 0x0: /* and */ case 0x0: /* and */
gen_op_andl_T0_T1(); tcg_gen_and_i32(tmp, tmp, tmp2);
if (!s->condexec_mask) if (!s->condexec_mask)
gen_op_logic_T0_cc(); gen_logic_CC(tmp);
break; break;
case 0x1: /* eor */ case 0x1: /* eor */
gen_op_xorl_T0_T1(); tcg_gen_xor_i32(tmp, tmp, tmp2);
if (!s->condexec_mask) if (!s->condexec_mask)
gen_op_logic_T0_cc(); gen_logic_CC(tmp);
break; break;
case 0x2: /* lsl */ case 0x2: /* lsl */
if (s->condexec_mask) { if (s->condexec_mask) {
gen_helper_shl(cpu_T[1], cpu_T[1], cpu_T[0]); gen_helper_shl(tmp2, tmp2, tmp);
} else { } else {
gen_helper_shl_cc(cpu_T[1], cpu_T[1], cpu_T[0]); gen_helper_shl_cc(tmp2, tmp2, tmp);
gen_op_logic_T1_cc(); gen_logic_CC(tmp2);
} }
break; break;
case 0x3: /* lsr */ case 0x3: /* lsr */
if (s->condexec_mask) { if (s->condexec_mask) {
gen_helper_shr(cpu_T[1], cpu_T[1], cpu_T[0]); gen_helper_shr(tmp2, tmp2, tmp);
} else { } else {
gen_helper_shr_cc(cpu_T[1], cpu_T[1], cpu_T[0]); gen_helper_shr_cc(tmp2, tmp2, tmp);
gen_op_logic_T1_cc(); gen_logic_CC(tmp2);
} }
break; break;
case 0x4: /* asr */ case 0x4: /* asr */
if (s->condexec_mask) { if (s->condexec_mask) {
gen_helper_sar(cpu_T[1], cpu_T[1], cpu_T[0]); gen_helper_sar(tmp2, tmp2, tmp);
} else { } else {
gen_helper_sar_cc(cpu_T[1], cpu_T[1], cpu_T[0]); gen_helper_sar_cc(tmp2, tmp2, tmp);
gen_op_logic_T1_cc(); gen_logic_CC(tmp2);
} }
break; break;
case 0x5: /* adc */ case 0x5: /* adc */
if (s->condexec_mask) if (s->condexec_mask)
gen_adc_T0_T1(); gen_adc(tmp, tmp2);
else else
gen_op_adcl_T0_T1_cc(); gen_helper_adc_cc(tmp, tmp, tmp2);
break; break;
case 0x6: /* sbc */ case 0x6: /* sbc */
if (s->condexec_mask) if (s->condexec_mask)
gen_sbc_T0_T1(); gen_sub_carry(tmp, tmp, tmp2);
else else
gen_op_sbcl_T0_T1_cc(); gen_helper_sbc_cc(tmp, tmp, tmp2);
break; break;
case 0x7: /* ror */ case 0x7: /* ror */
if (s->condexec_mask) { if (s->condexec_mask) {
gen_helper_ror(cpu_T[1], cpu_T[1], cpu_T[0]); gen_helper_ror(tmp2, tmp2, tmp);
} else { } else {
gen_helper_ror_cc(cpu_T[1], cpu_T[1], cpu_T[0]); gen_helper_ror_cc(tmp2, tmp2, tmp);
gen_op_logic_T1_cc(); gen_logic_CC(tmp2);
} }
break; break;
case 0x8: /* tst */ case 0x8: /* tst */
gen_op_andl_T0_T1(); tcg_gen_and_i32(tmp, tmp, tmp2);
gen_op_logic_T0_cc(); gen_logic_CC(tmp);
rd = 16; rd = 16;
break; break;
case 0x9: /* neg */ case 0x9: /* neg */
if (s->condexec_mask) if (s->condexec_mask)
tcg_gen_neg_i32(cpu_T[0], cpu_T[1]); tcg_gen_neg_i32(tmp, tmp2);
else else
gen_op_subl_T0_T1_cc(); gen_helper_sub_cc(tmp, tmp, tmp2);
break; break;
case 0xa: /* cmp */ case 0xa: /* cmp */
gen_op_subl_T0_T1_cc(); gen_helper_sub_cc(tmp, tmp, tmp2);
rd = 16; rd = 16;
break; break;
case 0xb: /* cmn */ case 0xb: /* cmn */
gen_op_addl_T0_T1_cc(); gen_helper_add_cc(tmp, tmp, tmp2);
rd = 16; rd = 16;
break; break;
case 0xc: /* orr */ case 0xc: /* orr */
gen_op_orl_T0_T1(); tcg_gen_or_i32(tmp, tmp, tmp2);
if (!s->condexec_mask) if (!s->condexec_mask)
gen_op_logic_T0_cc(); gen_logic_CC(tmp);
break; break;
case 0xd: /* mul */ case 0xd: /* mul */
gen_op_mull_T0_T1(); gen_mull(tmp, tmp2);
if (!s->condexec_mask) if (!s->condexec_mask)
gen_op_logic_T0_cc(); gen_logic_CC(tmp);
break; break;
case 0xe: /* bic */ case 0xe: /* bic */
gen_op_bicl_T0_T1(); tcg_gen_bic_i32(tmp, tmp, tmp2);
if (!s->condexec_mask) if (!s->condexec_mask)
gen_op_logic_T0_cc(); gen_logic_CC(tmp);
break; break;
case 0xf: /* mvn */ case 0xf: /* mvn */
gen_op_notl_T1(); tcg_gen_not_i32(tmp2, tmp2);
if (!s->condexec_mask) if (!s->condexec_mask)
gen_op_logic_T1_cc(); gen_logic_CC(tmp2);
val = 1; val = 1;
rm = rd; rm = rd;
break; break;
} }
if (rd != 16) { if (rd != 16) {
if (val) if (val) {
gen_movl_reg_T1(s, rm); store_reg(s, rm, tmp2);
else if (op != 0xf)
gen_movl_reg_T0(s, rd); dead_tmp(tmp);
} else {
store_reg(s, rd, tmp);
dead_tmp(tmp2);
}
} else {
dead_tmp(tmp);
dead_tmp(tmp2);
} }
break; break;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册