提交 c6f29ff0 编写于 作者: R Richard Henderson

tcg-i386: Tidy qemu_ld/st slow path

Use existing stack space for arguments; don't push/pop.
Use less ifdefs and more C ifs.
Signed-off-by: NRichard Henderson <rth@twiddle.net>
上级 8023ccda
...@@ -608,6 +608,14 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, ...@@ -608,6 +608,14 @@ static inline void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
tcg_out_modrm_offset(s, opc, arg, arg1, arg2); tcg_out_modrm_offset(s, opc, arg, arg1, arg2);
} }
static inline void tcg_out_sti(TCGContext *s, TCGType type, TCGReg base,
tcg_target_long ofs, tcg_target_long val)
{
int opc = OPC_MOVL_EvIz + (type == TCG_TYPE_I64 ? P_REXW : 0);
tcg_out_modrm_offset(s, opc, 0, base, ofs);
tcg_out32(s, val);
}
static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count) static void tcg_out_shifti(TCGContext *s, int subopc, int reg, int count)
{ {
/* Propagate an opcode prefix, such as P_DATA16. */ /* Propagate an opcode prefix, such as P_DATA16. */
...@@ -1463,22 +1471,12 @@ static void add_qemu_ldst_label(TCGContext *s, ...@@ -1463,22 +1471,12 @@ static void add_qemu_ldst_label(TCGContext *s,
/* /*
* Generate code for the slow path for a load at the end of block * Generate code for the slow path for a load at the end of block
*/ */
static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label) static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{ {
int s_bits; int opc = l->opc;
int opc = label->opc; int s_bits = opc & 3;
int mem_index = label->mem_index; TCGReg data_reg;
#if TCG_TARGET_REG_BITS == 32 uint8_t **label_ptr = &l->label_ptr[0];
int stack_adjust;
int addrlo_reg = label->addrlo_reg;
int addrhi_reg = label->addrhi_reg;
#endif
int data_reg = label->datalo_reg;
int data_reg2 = label->datahi_reg;
uint8_t *raddr = label->raddr;
uint8_t **label_ptr = &label->label_ptr[0];
s_bits = opc & 3;
/* resolve label address */ /* resolve label address */
*(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
...@@ -1486,22 +1484,27 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label) ...@@ -1486,22 +1484,27 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
*(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
} }
#if TCG_TARGET_REG_BITS == 32 if (TCG_TARGET_REG_BITS == 32) {
tcg_out_pushi(s, mem_index); int ofs = 0;
stack_adjust = 4;
if (TARGET_LONG_BITS == 64) { tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
tcg_out_push(s, addrhi_reg); ofs += 4;
stack_adjust += 4;
tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
ofs += 4;
if (TARGET_LONG_BITS == 64) {
tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
ofs += 4;
}
tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
} else {
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
/* The second argument is already loaded with addrlo. */
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2],
l->mem_index);
} }
tcg_out_push(s, addrlo_reg);
stack_adjust += 4;
tcg_out_push(s, TCG_AREG0);
stack_adjust += 4;
#else
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
/* The second argument is already loaded with addrlo. */
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[2], mem_index);
#endif
/* Code generation of qemu_ld/st's slow path calling MMU helper /* Code generation of qemu_ld/st's slow path calling MMU helper
...@@ -1520,18 +1523,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label) ...@@ -1520,18 +1523,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
tcg_out8(s, 5); tcg_out8(s, 5);
/* Dummy backward jump having information of fast path'pc for MMU helpers */ /* Dummy backward jump having information of fast path'pc for MMU helpers */
tcg_out8(s, OPC_JMP_long); tcg_out8(s, OPC_JMP_long);
*(int32_t *)s->code_ptr = (int32_t)(raddr - s->code_ptr - 4); *(int32_t *)s->code_ptr = (int32_t)(l->raddr - s->code_ptr - 4);
s->code_ptr += 4; s->code_ptr += 4;
#if TCG_TARGET_REG_BITS == 32 data_reg = l->datalo_reg;
if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
/* Pop and discard. This is 2 bytes smaller than the add. */
tcg_out_pop(s, TCG_REG_ECX);
} else if (stack_adjust != 0) {
tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust);
}
#endif
switch(opc) { switch(opc) {
case 0 | 4: case 0 | 4:
tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW); tcg_out_ext8s(s, data_reg, TCG_REG_EAX, P_REXW);
...@@ -1559,10 +1554,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label) ...@@ -1559,10 +1554,10 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
} else if (data_reg == TCG_REG_EDX) { } else if (data_reg == TCG_REG_EDX) {
/* xchg %edx, %eax */ /* xchg %edx, %eax */
tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0); tcg_out_opc(s, OPC_XCHG_ax_r32 + TCG_REG_EDX, 0, 0, 0);
tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EAX); tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EAX);
} else { } else {
tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX); tcg_out_mov(s, TCG_TYPE_I32, data_reg, TCG_REG_EAX);
tcg_out_mov(s, TCG_TYPE_I32, data_reg2, TCG_REG_EDX); tcg_out_mov(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_EDX);
} }
break; break;
default: default:
...@@ -1570,28 +1565,17 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label) ...@@ -1570,28 +1565,17 @@ static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
} }
/* Jump to the code corresponding to next IR of qemu_st */ /* Jump to the code corresponding to next IR of qemu_st */
tcg_out_jmp(s, (tcg_target_long)raddr); tcg_out_jmp(s, (tcg_target_long)l->raddr);
} }
/* /*
* Generate code for the slow path for a store at the end of block * Generate code for the slow path for a store at the end of block
*/ */
static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *label) static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{ {
int s_bits; int opc = l->opc;
int stack_adjust; int s_bits = opc & 3;
int opc = label->opc; uint8_t **label_ptr = &l->label_ptr[0];
int mem_index = label->mem_index;
int data_reg = label->datalo_reg;
#if TCG_TARGET_REG_BITS == 32
int data_reg2 = label->datahi_reg;
int addrlo_reg = label->addrlo_reg;
int addrhi_reg = label->addrhi_reg;
#endif
uint8_t *raddr = label->raddr;
uint8_t **label_ptr = &label->label_ptr[0];
s_bits = opc & 3;
/* resolve label address */ /* resolve label address */
*(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4); *(uint32_t *)label_ptr[0] = (uint32_t)(s->code_ptr - label_ptr[0] - 4);
...@@ -1599,31 +1583,37 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *label) ...@@ -1599,31 +1583,37 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
*(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4); *(uint32_t *)label_ptr[1] = (uint32_t)(s->code_ptr - label_ptr[1] - 4);
} }
#if TCG_TARGET_REG_BITS == 32 if (TCG_TARGET_REG_BITS == 32) {
tcg_out_pushi(s, mem_index); int ofs = 0;
stack_adjust = 4;
if (opc == 3) { tcg_out_st(s, TCG_TYPE_PTR, TCG_AREG0, TCG_REG_ESP, ofs);
tcg_out_push(s, data_reg2); ofs += 4;
stack_adjust += 4;
} tcg_out_st(s, TCG_TYPE_I32, l->addrlo_reg, TCG_REG_ESP, ofs);
tcg_out_push(s, data_reg); ofs += 4;
stack_adjust += 4;
if (TARGET_LONG_BITS == 64) { if (TARGET_LONG_BITS == 64) {
tcg_out_push(s, addrhi_reg); tcg_out_st(s, TCG_TYPE_I32, l->addrhi_reg, TCG_REG_ESP, ofs);
stack_adjust += 4; ofs += 4;
}
tcg_out_st(s, TCG_TYPE_I32, l->datalo_reg, TCG_REG_ESP, ofs);
ofs += 4;
if (opc == 3) {
tcg_out_st(s, TCG_TYPE_I32, l->datahi_reg, TCG_REG_ESP, ofs);
ofs += 4;
}
tcg_out_sti(s, TCG_TYPE_I32, TCG_REG_ESP, ofs, l->mem_index);
} else {
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
/* The second argument is already loaded with addrlo. */
tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
tcg_target_call_iarg_regs[2], l->datalo_reg);
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3],
l->mem_index);
} }
tcg_out_push(s, addrlo_reg);
stack_adjust += 4;
tcg_out_push(s, TCG_AREG0);
stack_adjust += 4;
#else
tcg_out_mov(s, TCG_TYPE_I64, tcg_target_call_iarg_regs[0], TCG_AREG0);
/* The second argument is already loaded with addrlo. */
tcg_out_mov(s, (opc == 3 ? TCG_TYPE_I64 : TCG_TYPE_I32),
tcg_target_call_iarg_regs[2], data_reg);
tcg_out_movi(s, TCG_TYPE_I32, tcg_target_call_iarg_regs[3], mem_index);
stack_adjust = 0;
#endif
/* Code generation of qemu_ld/st's slow path calling MMU helper /* Code generation of qemu_ld/st's slow path calling MMU helper
...@@ -1642,18 +1632,11 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *label) ...@@ -1642,18 +1632,11 @@ static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *label)
tcg_out8(s, 5); tcg_out8(s, 5);
/* Dummy backward jump having information of fast path'pc for MMU helpers */ /* Dummy backward jump having information of fast path'pc for MMU helpers */
tcg_out8(s, OPC_JMP_long); tcg_out8(s, OPC_JMP_long);
*(int32_t *)s->code_ptr = (int32_t)(raddr - s->code_ptr - 4); *(int32_t *)s->code_ptr = (int32_t)(l->raddr - s->code_ptr - 4);
s->code_ptr += 4; s->code_ptr += 4;
if (stack_adjust == (TCG_TARGET_REG_BITS / 8)) {
/* Pop and discard. This is 2 bytes smaller than the add. */
tcg_out_pop(s, TCG_REG_ECX);
} else if (stack_adjust != 0) {
tcg_out_addi(s, TCG_REG_CALL_STACK, stack_adjust);
}
/* Jump to the code corresponding to next IR of qemu_st */ /* Jump to the code corresponding to next IR of qemu_st */
tcg_out_jmp(s, (tcg_target_long)raddr); tcg_out_jmp(s, (tcg_target_long)l->raddr);
} }
/* /*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册