提交 57951c27 编写于 作者: A aurel32

target-ppc: convert most SPE integer instructions to TCG

Signed-off-by: NAurelien Jarno <aurelien@aurel32.net>

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@5668 c046a42c-6fe2-441c-8c8c-71466251a162
上级 970d622e
......@@ -832,10 +832,14 @@ static inline void cpu_clone_regs(CPUState *env, target_ulong newsp)
/*****************************************************************************/
/* CRF definitions */
#define CRF_LT 3
#define CRF_GT 2
#define CRF_EQ 1
#define CRF_SO 0
#define CRF_LT 3
#define CRF_GT 2
#define CRF_EQ 1
#define CRF_SO 0
#define CRF_CH (1 << 4)
#define CRF_CL (1 << 3)
#define CRF_CH_OR_CL (1 << 2)
#define CRF_CH_AND_CL (1 << 1)
/* XER definitions */
#define XER_SO 31
......
......@@ -23,3 +23,6 @@ DEF_HELPER(target_ulong, helper_popcntb_64, (target_ulong val))
DEF_HELPER(target_ulong, helper_srad, (target_ulong, target_ulong))
#endif
DEF_HELPER(uint32_t, helper_cntlsw32, (uint32_t))
DEF_HELPER(uint32_t, helper_cntlzw32, (uint32_t))
DEF_HELPER(uint32_t, helper_brinc, (uint32_t, uint32_t))
......@@ -1227,22 +1227,6 @@ void OPPROTO op_splatw_T1_64 (void)
RETURN();
}
void OPPROTO op_splatwi_T0_64 (void)
{
uint64_t tmp = PARAM1;
T0_64 = (tmp << 32) | tmp;
RETURN();
}
void OPPROTO op_splatwi_T1_64 (void)
{
uint64_t tmp = PARAM1;
T1_64 = (tmp << 32) | tmp;
RETURN();
}
void OPPROTO op_extsh_T1_64 (void)
{
T1_64 = (int32_t)((int16_t)T1_64);
......@@ -1267,152 +1251,6 @@ void OPPROTO op_srli32_T1_64 (void)
RETURN();
}
void OPPROTO op_evsel (void)
{
do_evsel();
RETURN();
}
void OPPROTO op_evaddw (void)
{
do_evaddw();
RETURN();
}
void OPPROTO op_evsubfw (void)
{
do_evsubfw();
RETURN();
}
void OPPROTO op_evneg (void)
{
do_evneg();
RETURN();
}
void OPPROTO op_evabs (void)
{
do_evabs();
RETURN();
}
void OPPROTO op_evextsh (void)
{
T0_64 = ((uint64_t)((int32_t)(int16_t)(T0_64 >> 32)) << 32) |
(uint64_t)((int32_t)(int16_t)T0_64);
RETURN();
}
void OPPROTO op_evextsb (void)
{
T0_64 = ((uint64_t)((int32_t)(int8_t)(T0_64 >> 32)) << 32) |
(uint64_t)((int32_t)(int8_t)T0_64);
RETURN();
}
void OPPROTO op_evcntlzw (void)
{
do_evcntlzw();
RETURN();
}
void OPPROTO op_evrndw (void)
{
do_evrndw();
RETURN();
}
void OPPROTO op_brinc (void)
{
do_brinc();
RETURN();
}
void OPPROTO op_evcntlsw (void)
{
do_evcntlsw();
RETURN();
}
void OPPROTO op_evsrws (void)
{
do_evsrws();
RETURN();
}
void OPPROTO op_evsrwu (void)
{
do_evsrwu();
RETURN();
}
void OPPROTO op_evslw (void)
{
do_evslw();
RETURN();
}
void OPPROTO op_evrlw (void)
{
do_evrlw();
RETURN();
}
void OPPROTO op_evmergelo (void)
{
T0_64 = (T0_64 << 32) | (T1_64 & 0x00000000FFFFFFFFULL);
RETURN();
}
void OPPROTO op_evmergehi (void)
{
T0_64 = (T0_64 & 0xFFFFFFFF00000000ULL) | (T1_64 >> 32);
RETURN();
}
void OPPROTO op_evmergelohi (void)
{
T0_64 = (T0_64 << 32) | (T1_64 >> 32);
RETURN();
}
void OPPROTO op_evmergehilo (void)
{
T0_64 = (T0_64 & 0xFFFFFFFF00000000ULL) | (T1_64 & 0x00000000FFFFFFFFULL);
RETURN();
}
void OPPROTO op_evcmpgts (void)
{
do_evcmpgts();
RETURN();
}
void OPPROTO op_evcmpgtu (void)
{
do_evcmpgtu();
RETURN();
}
void OPPROTO op_evcmplts (void)
{
do_evcmplts();
RETURN();
}
void OPPROTO op_evcmpltu (void)
{
do_evcmpltu();
RETURN();
}
void OPPROTO op_evcmpeq (void)
{
do_evcmpeq();
RETURN();
}
void OPPROTO op_evfssub (void)
{
do_evfssub();
......
......@@ -1624,46 +1624,18 @@ static always_inline uint32_t word_reverse (uint32_t val)
}
#define MASKBITS 16 // Random value - to be fixed (implementation dependant)
void do_brinc (void)
target_ulong helper_brinc (target_ulong arg1, target_ulong arg2)
{
uint32_t a, b, d, mask;
mask = UINT32_MAX >> (32 - MASKBITS);
a = T0 & mask;
b = T1 & mask;
a = arg1 & mask;
b = arg2 & mask;
d = word_reverse(1 + word_reverse(a | ~b));
T0 = (T0 & ~mask) | (d & b);
return (arg1 & ~mask) | (d & b);
}
#define DO_SPE_OP2(name) \
void do_ev##name (void) \
{ \
T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
(uint64_t)_do_e##name(T0_64, T1_64); \
}
#define DO_SPE_OP1(name) \
void do_ev##name (void) \
{ \
T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
(uint64_t)_do_e##name(T0_64); \
}
/* Fixed-point vector arithmetic */
static always_inline uint32_t _do_eabs (uint32_t val)
{
if ((val & 0x80000000) && val != 0x80000000)
val -= val;
return val;
}
static always_inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2)
{
return op1 + op2;
}
static always_inline int _do_ecntlsw (uint32_t val)
uint32_t helper_cntlsw32 (uint32_t val)
{
if (val & 0x80000000)
return clz32(~val);
......@@ -1671,88 +1643,23 @@ static always_inline int _do_ecntlsw (uint32_t val)
return clz32(val);
}
static always_inline int _do_ecntlzw (uint32_t val)
uint32_t helper_cntlzw32 (uint32_t val)
{
return clz32(val);
}
static always_inline uint32_t _do_eneg (uint32_t val)
{
if (val != 0x80000000)
val -= val;
return val;
}
static always_inline uint32_t _do_erlw (uint32_t op1, uint32_t op2)
{
return rotl32(op1, op2);
}
static always_inline uint32_t _do_erndw (uint32_t val)
{
return (val + 0x000080000000) & 0xFFFF0000;
}
static always_inline uint32_t _do_eslw (uint32_t op1, uint32_t op2)
{
/* No error here: 6 bits are used */
return op1 << (op2 & 0x3F);
}
static always_inline int32_t _do_esrws (int32_t op1, uint32_t op2)
{
/* No error here: 6 bits are used */
return op1 >> (op2 & 0x3F);
}
static always_inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2)
{
/* No error here: 6 bits are used */
return op1 >> (op2 & 0x3F);
}
static always_inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2)
{
return op2 - op1;
}
/* evabs */
DO_SPE_OP1(abs);
/* evaddw */
DO_SPE_OP2(addw);
/* evcntlsw */
DO_SPE_OP1(cntlsw);
/* evcntlzw */
DO_SPE_OP1(cntlzw);
/* evneg */
DO_SPE_OP1(neg);
/* evrlw */
DO_SPE_OP2(rlw);
/* evrnd */
DO_SPE_OP1(rndw);
/* evslw */
DO_SPE_OP2(slw);
/* evsrws */
DO_SPE_OP2(srws);
/* evsrwu */
DO_SPE_OP2(srwu);
/* evsubfw */
DO_SPE_OP2(subfw);
/* evsel is a little bit more complicated... */
static always_inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n)
{
if (n)
return op1;
else
return op2;
#define DO_SPE_OP1(name) \
void do_ev##name (void) \
{ \
T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
(uint64_t)_do_e##name(T0_64); \
}
void do_evsel (void)
{
T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) |
(uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1);
#define DO_SPE_OP2(name) \
void do_ev##name (void) \
{ \
T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
(uint64_t)_do_e##name(T0_64, T1_64); \
}
/* Fixed-point vector comparisons */
......@@ -1768,41 +1675,6 @@ static always_inline uint32_t _do_evcmp_merge (int t0, int t1)
{
return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
}
static always_inline int _do_ecmpeq (uint32_t op1, uint32_t op2)
{
return op1 == op2 ? 1 : 0;
}
static always_inline int _do_ecmpgts (int32_t op1, int32_t op2)
{
return op1 > op2 ? 1 : 0;
}
static always_inline int _do_ecmpgtu (uint32_t op1, uint32_t op2)
{
return op1 > op2 ? 1 : 0;
}
static always_inline int _do_ecmplts (int32_t op1, int32_t op2)
{
return op1 < op2 ? 1 : 0;
}
static always_inline int _do_ecmpltu (uint32_t op1, uint32_t op2)
{
return op1 < op2 ? 1 : 0;
}
/* evcmpeq */
DO_SPE_CMP(cmpeq);
/* evcmpgts */
DO_SPE_CMP(cmpgts);
/* evcmpgtu */
DO_SPE_CMP(cmpgtu);
/* evcmplts */
DO_SPE_CMP(cmplts);
/* evcmpltu */
DO_SPE_CMP(cmpltu);
/* Single precision floating-point conversions from/to integer */
static always_inline uint32_t _do_efscfsi (int32_t val)
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册