提交 e1dcf352 编写于 作者: R Richard Henderson

tcg: Add TCG_TARGET_HAS_MEMORY_BSWAP

For now, defined universally as true, since we previously required
backends to implement swapped memory operations.  Future patches
may now remove that support where it is onerous.
Signed-off-by: NRichard Henderson <richard.henderson@linaro.org>
上级 6498594c
......@@ -137,6 +137,7 @@ typedef enum {
#define TCG_TARGET_HAS_mul_vec 1
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
......
......@@ -131,6 +131,7 @@ enum {
};
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
......
......@@ -220,6 +220,8 @@ static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
#ifdef CONFIG_SOFTMMU
#define TCG_TARGET_NEED_LDST_LABELS
#endif
......
......@@ -203,6 +203,7 @@ extern bool use_mips32r2_instructions;
#endif
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
......
......@@ -128,6 +128,7 @@ void flush_icache_range(uintptr_t start, uintptr_t stop);
void tb_target_set_jmp_target(uintptr_t, uintptr_t, uintptr_t);
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
#ifdef CONFIG_SOFTMMU
#define TCG_TARGET_NEED_LDST_LABELS
......
......@@ -135,6 +135,7 @@ extern uint64_t s390_facilities;
#define TCG_TARGET_CALL_STACK_OFFSET 160
#define TCG_TARGET_EXTEND_ARGS 1
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
#define TCG_TARGET_DEFAULT_MO (TCG_MO_ALL & ~TCG_MO_ST_LD)
......
......@@ -164,6 +164,7 @@ extern bool use_vis3_instructions;
#define TCG_AREG0 TCG_REG_I0
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
{
......
......@@ -2694,25 +2694,78 @@ static void tcg_gen_req_mo(TCGBar type)
void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGMemOp orig_memop;
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 0, 0);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 0));
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
memop &= ~MO_BSWAP;
/* The bswap primitive requires zero-extended input. */
if ((memop & MO_SSIZE) == MO_SW) {
memop &= ~MO_SIGN;
}
}
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
case MO_16:
tcg_gen_bswap16_i32(val, val);
if (orig_memop & MO_SIGN) {
tcg_gen_ext16s_i32(val, val);
}
break;
case MO_32:
tcg_gen_bswap32_i32(val, val);
break;
default:
g_assert_not_reached();
}
}
}
void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
TCGv_i32 swap = NULL;
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 0, 1);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 1));
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i32();
switch (memop & MO_SIZE) {
case MO_16:
tcg_gen_ext16u_i32(swap, val);
tcg_gen_bswap16_i32(swap, swap);
break;
case MO_32:
tcg_gen_bswap32_i32(swap, val);
break;
default:
g_assert_not_reached();
}
val = swap;
memop &= ~MO_BSWAP;
}
gen_ldst_i32(INDEX_op_qemu_st_i32, val, addr, memop, idx);
if (swap) {
tcg_temp_free_i32(swap);
}
}
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
TCGMemOp orig_memop;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
if (memop & MO_SIGN) {
......@@ -2723,24 +2776,85 @@ void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
return;
}
tcg_gen_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
memop = tcg_canonicalize_memop(memop, 1, 0);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 0));
orig_memop = memop;
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
memop &= ~MO_BSWAP;
/* The bswap primitive requires zero-extended input. */
if ((memop & MO_SIGN) && (memop & MO_SIZE) < MO_64) {
memop &= ~MO_SIGN;
}
}
gen_ldst_i64(INDEX_op_qemu_ld_i64, val, addr, memop, idx);
if ((orig_memop ^ memop) & MO_BSWAP) {
switch (orig_memop & MO_SIZE) {
case MO_16:
tcg_gen_bswap16_i64(val, val);
if (orig_memop & MO_SIGN) {
tcg_gen_ext16s_i64(val, val);
}
break;
case MO_32:
tcg_gen_bswap32_i64(val, val);
if (orig_memop & MO_SIGN) {
tcg_gen_ext32s_i64(val, val);
}
break;
case MO_64:
tcg_gen_bswap64_i64(val, val);
break;
default:
g_assert_not_reached();
}
}
}
void tcg_gen_qemu_st_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
TCGv_i64 swap = NULL;
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
tcg_gen_qemu_st_i32(TCGV_LOW(val), addr, idx, memop);
return;
}
tcg_gen_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
memop = tcg_canonicalize_memop(memop, 1, 1);
trace_guest_mem_before_tcg(tcg_ctx->cpu, cpu_env,
addr, trace_mem_get_info(memop, 1));
if (!TCG_TARGET_HAS_MEMORY_BSWAP && (memop & MO_BSWAP)) {
swap = tcg_temp_new_i64();
switch (memop & MO_SIZE) {
case MO_16:
tcg_gen_ext16u_i64(swap, val);
tcg_gen_bswap16_i64(swap, swap);
break;
case MO_32:
tcg_gen_ext32u_i64(swap, val);
tcg_gen_bswap32_i64(swap, swap);
break;
case MO_64:
tcg_gen_bswap64_i64(swap, val);
break;
default:
g_assert_not_reached();
}
val = swap;
memop &= ~MO_BSWAP;
}
gen_ldst_i64(INDEX_op_qemu_st_i64, val, addr, memop, idx);
if (swap) {
tcg_temp_free_i64(swap);
}
}
static void tcg_gen_ext_i32(TCGv_i32 ret, TCGv_i32 val, TCGMemOp opc)
......
......@@ -198,6 +198,8 @@ static inline void flush_icache_range(uintptr_t start, uintptr_t stop)
We prefer consistency across hosts on this. */
#define TCG_TARGET_DEFAULT_MO (0)
#define TCG_TARGET_HAS_MEMORY_BSWAP 1
static inline void tb_target_set_jmp_target(uintptr_t tc_ptr,
uintptr_t jmp_addr, uintptr_t addr)
{
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册