提交 6a6ae23f 编写于 作者: A aurel32

target-ppc: convert SPE load/store to TCG

Signed-off-by: NAurelien Jarno <aurelien@aurel32.net>

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5804 c046a42c-6fe2-441c-8c8c-71466251a162
上级 38d14952
...@@ -536,12 +536,6 @@ struct CPUPPCState { ...@@ -536,12 +536,6 @@ struct CPUPPCState {
#if (TARGET_LONG_BITS > HOST_LONG_BITS) || defined(HOST_I386) #if (TARGET_LONG_BITS > HOST_LONG_BITS) || defined(HOST_I386)
target_ulong t2; target_ulong t2;
#endif #endif
#if !defined(TARGET_PPC64)
/* temporary fixed-point registers
* used to emulate 64 bits registers on 32 bits targets
*/
uint64_t t0_64, t1_64, t2_64;
#endif
/* general purpose registers */ /* general purpose registers */
target_ulong gpr[32]; target_ulong gpr[32];
......
...@@ -44,16 +44,6 @@ register target_ulong T1 asm(AREG2); ...@@ -44,16 +44,6 @@ register target_ulong T1 asm(AREG2);
register target_ulong T2 asm(AREG3); register target_ulong T2 asm(AREG3);
#define TDX "%016lx" #define TDX "%016lx"
#endif #endif
/* We may, sometime, need 64 bits registers on 32 bits targets */
#if !defined(TARGET_PPC64)
#define T0_64 (env->t0_64)
#define T1_64 (env->t1_64)
#define T2_64 (env->t2_64)
#else
#define T0_64 T0
#define T1_64 T1
#define T2_64 T2
#endif
#define FT0 (env->ft0) #define FT0 (env->ft0)
#define FT1 (env->ft1) #define FT1 (env->ft1)
......
...@@ -914,35 +914,3 @@ void OPPROTO op_store_booke_tsr (void) ...@@ -914,35 +914,3 @@ void OPPROTO op_store_booke_tsr (void)
} }
#endif /* !defined(CONFIG_USER_ONLY) */ #endif /* !defined(CONFIG_USER_ONLY) */
/* SPE extension */
void OPPROTO op_splatw_T1_64 (void)
{
T1_64 = (T1_64 << 32) | (T1_64 & 0x00000000FFFFFFFFULL);
RETURN();
}
void OPPROTO op_extsh_T1_64 (void)
{
T1_64 = (int32_t)((int16_t)T1_64);
RETURN();
}
void OPPROTO op_sli16_T1_64 (void)
{
T1_64 = T1_64 << 16;
RETURN();
}
void OPPROTO op_sli32_T1_64 (void)
{
T1_64 = T1_64 << 32;
RETURN();
}
void OPPROTO op_srli32_T1_64 (void)
{
T1_64 = T1_64 >> 32;
RETURN();
}
...@@ -642,262 +642,4 @@ void OPPROTO glue(op_POWER2_stfq_le, MEMSUFFIX) (void) ...@@ -642,262 +642,4 @@ void OPPROTO glue(op_POWER2_stfq_le, MEMSUFFIX) (void)
RETURN(); RETURN();
} }
/* SPE extension */
#define _PPC_SPE_LD_OP(name, op) \
void OPPROTO glue(glue(op_spe_l, name), MEMSUFFIX) (void) \
{ \
T1_64 = glue(op, MEMSUFFIX)((uint32_t)T0); \
RETURN(); \
}
#if defined(TARGET_PPC64)
#define _PPC_SPE_LD_OP_64(name, op) \
void OPPROTO glue(glue(glue(op_spe_l, name), _64), MEMSUFFIX) (void) \
{ \
T1_64 = glue(op, MEMSUFFIX)((uint64_t)T0); \
RETURN(); \
}
#define PPC_SPE_LD_OP(name, op) \
_PPC_SPE_LD_OP(name, op); \
_PPC_SPE_LD_OP_64(name, op)
#else
#define PPC_SPE_LD_OP(name, op) \
_PPC_SPE_LD_OP(name, op)
#endif
#define _PPC_SPE_ST_OP(name, op) \
void OPPROTO glue(glue(op_spe_st, name), MEMSUFFIX) (void) \
{ \
glue(op, MEMSUFFIX)((uint32_t)T0, T1_64); \
RETURN(); \
}
#if defined(TARGET_PPC64)
#define _PPC_SPE_ST_OP_64(name, op) \
void OPPROTO glue(glue(glue(op_spe_st, name), _64), MEMSUFFIX) (void) \
{ \
glue(op, MEMSUFFIX)((uint64_t)T0, T1_64); \
RETURN(); \
}
#define PPC_SPE_ST_OP(name, op) \
_PPC_SPE_ST_OP(name, op); \
_PPC_SPE_ST_OP_64(name, op)
#else
#define PPC_SPE_ST_OP(name, op) \
_PPC_SPE_ST_OP(name, op)
#endif
PPC_SPE_LD_OP(dd, ldu64);
PPC_SPE_ST_OP(dd, st64);
PPC_SPE_LD_OP(dd_le, ldu64r);
PPC_SPE_ST_OP(dd_le, st64r);
static always_inline uint64_t glue(spe_ldw, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = (uint64_t)glue(ldu32, MEMSUFFIX)(EA) << 32;
ret |= (uint64_t)glue(ldu32, MEMSUFFIX)(EA + 4);
return ret;
}
PPC_SPE_LD_OP(dw, spe_ldw);
static always_inline void glue(spe_stdw, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st32, MEMSUFFIX)(EA, data >> 32);
glue(st32, MEMSUFFIX)(EA + 4, data);
}
PPC_SPE_ST_OP(dw, spe_stdw);
static always_inline uint64_t glue(spe_ldw_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = (uint64_t)glue(ldu32r, MEMSUFFIX)(EA) << 32;
ret |= (uint64_t)glue(ldu32r, MEMSUFFIX)(EA + 4);
return ret;
}
PPC_SPE_LD_OP(dw_le, spe_ldw_le);
static always_inline void glue(spe_stdw_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st32r, MEMSUFFIX)(EA, data >> 32);
glue(st32r, MEMSUFFIX)(EA + 4, data);
}
PPC_SPE_ST_OP(dw_le, spe_stdw_le);
static always_inline uint64_t glue(spe_ldh, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = (uint64_t)glue(ldu16, MEMSUFFIX)(EA) << 48;
ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 2) << 32;
ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 4) << 16;
ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 6);
return ret;
}
PPC_SPE_LD_OP(dh, spe_ldh);
static always_inline void glue(spe_stdh, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st16, MEMSUFFIX)(EA, data >> 48);
glue(st16, MEMSUFFIX)(EA + 2, data >> 32);
glue(st16, MEMSUFFIX)(EA + 4, data >> 16);
glue(st16, MEMSUFFIX)(EA + 6, data);
}
PPC_SPE_ST_OP(dh, spe_stdh);
static always_inline uint64_t glue(spe_ldh_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = (uint64_t)glue(ldu16r, MEMSUFFIX)(EA) << 48;
ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 2) << 32;
ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 4) << 16;
ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 6);
return ret;
}
PPC_SPE_LD_OP(dh_le, spe_ldh_le);
static always_inline void glue(spe_stdh_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st16r, MEMSUFFIX)(EA, data >> 48);
glue(st16r, MEMSUFFIX)(EA + 2, data >> 32);
glue(st16r, MEMSUFFIX)(EA + 4, data >> 16);
glue(st16r, MEMSUFFIX)(EA + 6, data);
}
PPC_SPE_ST_OP(dh_le, spe_stdh_le);
static always_inline uint64_t glue(spe_lwhe, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = (uint64_t)glue(ldu16, MEMSUFFIX)(EA) << 48;
ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 2) << 16;
return ret;
}
PPC_SPE_LD_OP(whe, spe_lwhe);
static always_inline void glue(spe_stwhe, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st16, MEMSUFFIX)(EA, data >> 48);
glue(st16, MEMSUFFIX)(EA + 2, data >> 16);
}
PPC_SPE_ST_OP(whe, spe_stwhe);
static always_inline uint64_t glue(spe_lwhe_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = (uint64_t)glue(ldu16r, MEMSUFFIX)(EA) << 48;
ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 2) << 16;
return ret;
}
PPC_SPE_LD_OP(whe_le, spe_lwhe_le);
static always_inline void glue(spe_stwhe_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st16r, MEMSUFFIX)(EA, data >> 48);
glue(st16r, MEMSUFFIX)(EA + 2, data >> 16);
}
PPC_SPE_ST_OP(whe_le, spe_stwhe_le);
static always_inline uint64_t glue(spe_lwhou, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = (uint64_t)glue(ldu16, MEMSUFFIX)(EA) << 32;
ret |= (uint64_t)glue(ldu16, MEMSUFFIX)(EA + 2);
return ret;
}
PPC_SPE_LD_OP(whou, spe_lwhou);
static always_inline uint64_t glue(spe_lwhos, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = ((uint64_t)((int32_t)glue(lds16, MEMSUFFIX)(EA))) << 32;
ret |= (uint64_t)((int32_t)glue(lds16, MEMSUFFIX)(EA + 2));
return ret;
}
PPC_SPE_LD_OP(whos, spe_lwhos);
static always_inline void glue(spe_stwho, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st16, MEMSUFFIX)(EA, data >> 32);
glue(st16, MEMSUFFIX)(EA + 2, data);
}
PPC_SPE_ST_OP(who, spe_stwho);
static always_inline uint64_t glue(spe_lwhou_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = (uint64_t)glue(ldu16r, MEMSUFFIX)(EA) << 32;
ret |= (uint64_t)glue(ldu16r, MEMSUFFIX)(EA + 2);
return ret;
}
PPC_SPE_LD_OP(whou_le, spe_lwhou_le);
static always_inline uint64_t glue(spe_lwhos_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
ret = ((uint64_t)((int32_t)glue(lds16r, MEMSUFFIX)(EA))) << 32;
ret |= (uint64_t)((int32_t)glue(lds16r, MEMSUFFIX)(EA + 2));
return ret;
}
PPC_SPE_LD_OP(whos_le, spe_lwhos_le);
static always_inline void glue(spe_stwho_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st16r, MEMSUFFIX)(EA, data >> 32);
glue(st16r, MEMSUFFIX)(EA + 2, data);
}
PPC_SPE_ST_OP(who_le, spe_stwho_le);
static always_inline void glue(spe_stwwo, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st32, MEMSUFFIX)(EA, data);
}
PPC_SPE_ST_OP(wwo, spe_stwwo);
static always_inline void glue(spe_stwwo_le, MEMSUFFIX) (target_ulong EA,
uint64_t data)
{
glue(st32r, MEMSUFFIX)(EA, data);
}
PPC_SPE_ST_OP(wwo_le, spe_stwwo_le);
static always_inline uint64_t glue(spe_lh, MEMSUFFIX) (target_ulong EA)
{
uint16_t tmp;
tmp = glue(ldu16, MEMSUFFIX)(EA);
return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16);
}
PPC_SPE_LD_OP(h, spe_lh);
static always_inline uint64_t glue(spe_lh_le, MEMSUFFIX) (target_ulong EA)
{
uint16_t tmp;
tmp = glue(ldu16r, MEMSUFFIX)(EA);
return ((uint64_t)tmp << 48) | ((uint64_t)tmp << 16);
}
PPC_SPE_LD_OP(h_le, spe_lh_le);
static always_inline uint64_t glue(spe_lwwsplat, MEMSUFFIX) (target_ulong EA)
{
uint32_t tmp;
tmp = glue(ldu32, MEMSUFFIX)(EA);
return ((uint64_t)tmp << 32) | (uint64_t)tmp;
}
PPC_SPE_LD_OP(wwsplat, spe_lwwsplat);
static always_inline
uint64_t glue(spe_lwwsplat_le, MEMSUFFIX) (target_ulong EA)
{
uint32_t tmp;
tmp = glue(ldu32r, MEMSUFFIX)(EA);
return ((uint64_t)tmp << 32) | (uint64_t)tmp;
}
PPC_SPE_LD_OP(wwsplat_le, spe_lwwsplat_le);
static always_inline uint64_t glue(spe_lwhsplat, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
uint16_t tmp;
tmp = glue(ldu16, MEMSUFFIX)(EA);
ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32);
tmp = glue(ldu16, MEMSUFFIX)(EA + 2);
ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp;
return ret;
}
PPC_SPE_LD_OP(whsplat, spe_lwhsplat);
static always_inline
uint64_t glue(spe_lwhsplat_le, MEMSUFFIX) (target_ulong EA)
{
uint64_t ret;
uint16_t tmp;
tmp = glue(ldu16r, MEMSUFFIX)(EA);
ret = ((uint64_t)tmp << 48) | ((uint64_t)tmp << 32);
tmp = glue(ldu16r, MEMSUFFIX)(EA + 2);
ret |= ((uint64_t)tmp << 16) | (uint64_t)tmp;
return ret;
}
PPC_SPE_LD_OP(whsplat_le, spe_lwhsplat_le);
#undef MEMSUFFIX #undef MEMSUFFIX
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册