提交 dfbc799d 编写于 作者: A aurel32

target-ppc: convert load/store string instructions to TCG

Signed-off-by: NAurelien Jarno <aurelien@aurel32.net>

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5828 c046a42c-6fe2-441c-8c8c-71466251a162
上级 37d269df
......@@ -9,6 +9,9 @@ DEF_HELPER_3(td, void, tl, tl, i32)
DEF_HELPER_2(lmw, void, tl, i32)
DEF_HELPER_2(stmw, void, tl, i32)
DEF_HELPER_3(lsw, void, tl, i32, i32)
DEF_HELPER_4(lswx, void, tl, i32, i32, i32)
DEF_HELPER_3(stsw, void, tl, i32, i32)
DEF_HELPER_1(dcbz, void, tl)
DEF_HELPER_1(dcbz_970, void, tl)
DEF_HELPER_1(icbi, void, tl)
......
......@@ -170,6 +170,99 @@ void helper_stmw (target_ulong addr, uint32_t reg)
}
}
void helper_lsw(target_ulong addr, uint32_t nb, uint32_t reg)
{
int sh;
#ifdef CONFIG_USER_ONLY
#define ldfunl ldl_raw
#define ldfunb ldub_raw
#else
int (*ldfunl)(target_ulong);
int (*ldfunb)(target_ulong);
switch (env->mmu_idx) {
default:
case 0:
ldfunl = ldl_user;
ldfunb = ldub_user;
break;
case 1:
ldfunl = ldl_kernel;
ldfunb = ldub_kernel;
break;
case 2:
ldfunl = ldl_hypv;
ldfunb = ldub_hypv;
break;
}
#endif
for (; nb > 3; nb -= 4, addr += 4) {
env->gpr[reg] = ldfunl(get_addr(addr));
reg = (reg + 1) % 32;
}
if (unlikely(nb > 0)) {
env->gpr[reg] = 0;
for (sh = 24; nb > 0; nb--, addr++, sh -= 8) {
env->gpr[reg] |= ldfunb(get_addr(addr)) << sh;
}
}
}
/* PPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
* In an other hand, IBM says this is valid, but rA won't be loaded.
* For now, I'll follow the spec...
*/
void helper_lswx(target_ulong addr, uint32_t reg, uint32_t ra, uint32_t rb)
{
if (likely(xer_bc != 0)) {
if (unlikely((ra != 0 && reg < ra && (reg + xer_bc) > ra) ||
(reg < rb && (reg + xer_bc) > rb))) {
raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_LSWX);
} else {
helper_lsw(addr, xer_bc, reg);
}
}
}
void helper_stsw(target_ulong addr, uint32_t nb, uint32_t reg)
{
int sh;
#ifdef CONFIG_USER_ONLY
#define stfunl stl_raw
#define stfunb stb_raw
#else
void (*stfunl)(target_ulong, int);
void (*stfunb)(target_ulong, int);
switch (env->mmu_idx) {
default:
case 0:
stfunl = stl_user;
stfunb = stb_user;
break;
case 1:
stfunl = stl_kernel;
stfunb = stb_kernel;
break;
case 2:
stfunl = stl_hypv;
stfunb = stb_hypv;
break;
}
#endif
for (; nb > 3; nb -= 4, addr += 4) {
stfunl(get_addr(addr), env->gpr[reg]);
reg = (reg + 1) % 32;
}
if (unlikely(nb > 0)) {
for (sh = 24; nb > 0; nb--, addr++, sh -= 8)
stfunb(get_addr(addr), (env->gpr[reg] >> sh) & 0xFF);
}
}
static void do_dcbz(target_ulong addr, int dcache_line_size)
{
target_long mask = get_addr(~(dcache_line_size - 1));
......
......@@ -21,19 +21,12 @@
#if defined(MEMSUFFIX)
/* Memory load/store helpers */
void glue(do_lsw, MEMSUFFIX) (int dst);
void glue(do_stsw, MEMSUFFIX) (int src);
void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb);
void glue(do_POWER2_lfq, MEMSUFFIX) (void);
void glue(do_POWER2_lfq_le, MEMSUFFIX) (void);
void glue(do_POWER2_stfq, MEMSUFFIX) (void);
void glue(do_POWER2_stfq_le, MEMSUFFIX) (void);
#if defined(TARGET_PPC64)
void glue(do_lsw_64, MEMSUFFIX) (int dst);
void glue(do_stsw_64, MEMSUFFIX) (int src);
#endif
#else
void do_print_mem_EA (target_ulong EA);
......
......@@ -20,78 +20,6 @@
#include "op_mem_access.h"
void glue(do_lsw, MEMSUFFIX) (int dst)
{
uint32_t tmp;
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
env->gpr[dst++] = glue(ldu32, MEMSUFFIX)((uint32_t)T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
tmp |= glue(ldu8, MEMSUFFIX)((uint32_t)T0) << sh;
}
env->gpr[dst] = tmp;
}
}
#if defined(TARGET_PPC64)
void glue(do_lsw_64, MEMSUFFIX) (int dst)
{
uint32_t tmp;
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
env->gpr[dst++] = glue(ldu32, MEMSUFFIX)((uint64_t)T0);
if (unlikely(dst == 32))
dst = 0;
}
if (unlikely(T1 != 0)) {
tmp = 0;
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8) {
tmp |= glue(ldu8, MEMSUFFIX)((uint64_t)T0) << sh;
}
env->gpr[dst] = tmp;
}
}
#endif
void glue(do_stsw, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
glue(st32, MEMSUFFIX)((uint32_t)T0, env->gpr[src++]);
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
glue(st8, MEMSUFFIX)((uint32_t)T0, (env->gpr[src] >> sh) & 0xFF);
}
}
#if defined(TARGET_PPC64)
void glue(do_stsw_64, MEMSUFFIX) (int src)
{
int sh;
for (; T1 > 3; T1 -= 4, T0 += 4) {
glue(st32, MEMSUFFIX)((uint64_t)T0, env->gpr[src++]);
if (unlikely(src == 32))
src = 0;
}
if (unlikely(T1 != 0)) {
for (sh = 24; T1 > 0; T1--, T0++, sh -= 8)
glue(st8, MEMSUFFIX)((uint64_t)T0, (env->gpr[src] >> sh) & 0xFF);
}
}
#endif
/* PowerPC 601 specific instructions (POWER bridge) */
// XXX: to be tested
void glue(do_POWER_lscbx, MEMSUFFIX) (int dest, int ra, int rb)
......
......@@ -20,74 +20,6 @@
#include "op_mem_access.h"
/*** Integer load and store strings ***/
void OPPROTO glue(op_lswi, MEMSUFFIX) (void)
{
glue(do_lsw, MEMSUFFIX)(PARAM1);
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lswi_64, MEMSUFFIX) (void)
{
glue(do_lsw_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
/* PPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
* In an other hand, IBM says this is valid, but rA won't be loaded.
* For now, I'll follow the spec...
*/
void OPPROTO glue(op_lswx, MEMSUFFIX) (void)
{
/* Note: T1 comes from xer_bc then no cast is needed */
if (likely(T1 != 0)) {
if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
(PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_LSWX);
} else {
glue(do_lsw, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_lswx_64, MEMSUFFIX) (void)
{
/* Note: T1 comes from xer_bc then no cast is needed */
if (likely(T1 != 0)) {
if (unlikely((PARAM1 < PARAM2 && (PARAM1 + T1) > PARAM2) ||
(PARAM1 < PARAM3 && (PARAM1 + T1) > PARAM3))) {
raise_exception_err(env, POWERPC_EXCP_PROGRAM,
POWERPC_EXCP_INVAL |
POWERPC_EXCP_INVAL_LSWX);
} else {
glue(do_lsw_64, MEMSUFFIX)(PARAM1);
}
}
RETURN();
}
#endif
void OPPROTO glue(op_stsw, MEMSUFFIX) (void)
{
glue(do_stsw, MEMSUFFIX)(PARAM1);
RETURN();
}
#if defined(TARGET_PPC64)
void OPPROTO glue(op_stsw_64, MEMSUFFIX) (void)
{
glue(do_stsw_64, MEMSUFFIX)(PARAM1);
RETURN();
}
#endif
/* Load and set reservation */
void OPPROTO glue(op_lwarx, MEMSUFFIX) (void)
{
......
......@@ -3118,43 +3118,6 @@ GEN_HANDLER(stmw, 0x2F, 0xFF, 0xFF, 0x00000000, PPC_INTEGER)
}
/*** Integer load and store strings ***/
#define op_ldsts(name, start) (*gen_op_##name[ctx->mem_idx])(start)
#define op_ldstsx(name, rd, ra, rb) (*gen_op_##name[ctx->mem_idx])(rd, ra, rb)
/* string load & stores are by definition endian-safe */
#define gen_op_lswi_le_raw gen_op_lswi_raw
#define gen_op_lswi_le_user gen_op_lswi_user
#define gen_op_lswi_le_kernel gen_op_lswi_kernel
#define gen_op_lswi_le_hypv gen_op_lswi_hypv
#define gen_op_lswi_le_64_raw gen_op_lswi_raw
#define gen_op_lswi_le_64_user gen_op_lswi_user
#define gen_op_lswi_le_64_kernel gen_op_lswi_kernel
#define gen_op_lswi_le_64_hypv gen_op_lswi_hypv
static GenOpFunc1 *gen_op_lswi[NB_MEM_FUNCS] = {
GEN_MEM_FUNCS(lswi),
};
#define gen_op_lswx_le_raw gen_op_lswx_raw
#define gen_op_lswx_le_user gen_op_lswx_user
#define gen_op_lswx_le_kernel gen_op_lswx_kernel
#define gen_op_lswx_le_hypv gen_op_lswx_hypv
#define gen_op_lswx_le_64_raw gen_op_lswx_raw
#define gen_op_lswx_le_64_user gen_op_lswx_user
#define gen_op_lswx_le_64_kernel gen_op_lswx_kernel
#define gen_op_lswx_le_64_hypv gen_op_lswx_hypv
static GenOpFunc3 *gen_op_lswx[NB_MEM_FUNCS] = {
GEN_MEM_FUNCS(lswx),
};
#define gen_op_stsw_le_raw gen_op_stsw_raw
#define gen_op_stsw_le_user gen_op_stsw_user
#define gen_op_stsw_le_kernel gen_op_stsw_kernel
#define gen_op_stsw_le_hypv gen_op_stsw_hypv
#define gen_op_stsw_le_64_raw gen_op_stsw_raw
#define gen_op_stsw_le_64_user gen_op_stsw_user
#define gen_op_stsw_le_64_kernel gen_op_stsw_kernel
#define gen_op_stsw_le_64_hypv gen_op_stsw_hypv
static GenOpFunc1 *gen_op_stsw[NB_MEM_FUNCS] = {
GEN_MEM_FUNCS(stsw),
};
/* lswi */
/* PowerPC32 specification says we must generate an exception if
* rA is in the range of registers to be loaded.
......@@ -3163,6 +3126,8 @@ static GenOpFunc1 *gen_op_stsw[NB_MEM_FUNCS] = {
*/
GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING)
{
TCGv t0;
TCGv_i32 t1, t2;
int nb = NB(ctx->opcode);
int start = rD(ctx->opcode);
int ra = rA(ctx->opcode);
......@@ -3180,49 +3145,67 @@ GEN_HANDLER(lswi, 0x1F, 0x15, 0x12, 0x00000001, PPC_STRING)
}
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_addr_register(cpu_T[0], ctx);
tcg_gen_movi_tl(cpu_T[1], nb);
op_ldsts(lswi, start);
t0 = tcg_temp_new();
gen_addr_register(t0, ctx);
t1 = tcg_const_i32(nb);
t2 = tcg_const_i32(start);
gen_helper_lsw(t0, t1, t2);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
}
/* lswx */
GEN_HANDLER(lswx, 0x1F, 0x15, 0x10, 0x00000001, PPC_STRING)
{
int ra = rA(ctx->opcode);
int rb = rB(ctx->opcode);
TCGv t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_const_i32(rD(ctx->opcode));
TCGv_i32 t2 = tcg_const_i32(rA(ctx->opcode));
TCGv_i32 t3 = tcg_const_i32(rB(ctx->opcode));
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_addr_reg_index(cpu_T[0], ctx);
if (ra == 0) {
ra = rb;
}
tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F);
op_ldstsx(lswx, rD(ctx->opcode), ra, rb);
gen_addr_reg_index(t0, ctx);
gen_helper_lswx(t0, t1, t2, t3);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t3);
}
/* stswi */
GEN_HANDLER(stswi, 0x1F, 0x15, 0x16, 0x00000001, PPC_STRING)
{
int nb = NB(ctx->opcode);
TCGv t0 = tcg_temp_new();
TCGv_i32 t1;
TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode));
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_addr_register(cpu_T[0], ctx);
gen_addr_register(t0, ctx);
if (nb == 0)
nb = 32;
tcg_gen_movi_tl(cpu_T[1], nb);
op_ldsts(stsw, rS(ctx->opcode));
t1 = tcg_const_i32(nb);
gen_helper_stsw(t0, t1, t2);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
}
/* stswx */
GEN_HANDLER(stswx, 0x1F, 0x15, 0x14, 0x00000001, PPC_STRING)
{
TCGv t0 = tcg_temp_new();
TCGv_i32 t1 = tcg_temp_new_i32();
TCGv_i32 t2 = tcg_const_i32(rS(ctx->opcode));
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
gen_addr_reg_index(cpu_T[0], ctx);
tcg_gen_andi_tl(cpu_T[1], cpu_xer, 0x7F);
op_ldsts(stsw, rS(ctx->opcode));
gen_addr_reg_index(t0, ctx);
tcg_gen_trunc_tl_i32(t1, cpu_xer);
tcg_gen_andi_i32(t1, t1, 0x7F);
gen_helper_stsw(t0, t1, t2);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
}
/*** Memory synchronisation ***/
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册