提交 44e7757c 编写于 作者: B blueswir1

Convert other float and VIS ops to TCG


git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4091 c046a42c-6fe2-441c-8c8c-71466251a162
上级 ff07ec83
......@@ -84,3 +84,99 @@ void TCG_HELPER_PROTO helper_fcmpeq_fcc3(void);
#endif
#endif
void TCG_HELPER_PROTO raise_exception(int tt);
#define F_HELPER_0_0(name) void TCG_HELPER_PROTO helper_f ## name(void)
#if defined(CONFIG_USER_ONLY)
#define F_HELPER_SDQ_0_0(name) \
F_HELPER_0_0(name ## s); \
F_HELPER_0_0(name ## d); \
F_HELPER_0_0(name ## q)
#else
#define F_HELPER_SDQ_0_0(name) \
F_HELPER_0_0(name ## s); \
F_HELPER_0_0(name ## d);
#endif
F_HELPER_SDQ_0_0(add);
F_HELPER_SDQ_0_0(sub);
F_HELPER_SDQ_0_0(mul);
F_HELPER_SDQ_0_0(div);
F_HELPER_0_0(smuld);
F_HELPER_0_0(dmulq);
F_HELPER_SDQ_0_0(neg);
F_HELPER_SDQ_0_0(ito);
#ifdef TARGET_SPARC64
F_HELPER_SDQ_0_0(xto);
#endif
F_HELPER_0_0(dtos);
F_HELPER_0_0(stod);
#if defined(CONFIG_USER_ONLY)
F_HELPER_0_0(qtos);
F_HELPER_0_0(stoq);
F_HELPER_0_0(qtod);
F_HELPER_0_0(dtoq);
#endif
F_HELPER_0_0(stoi);
F_HELPER_0_0(dtoi);
#if defined(CONFIG_USER_ONLY)
F_HELPER_0_0(qtoi);
#endif
#ifdef TARGET_SPARC64
F_HELPER_0_0(stox);
F_HELPER_0_0(dtox);
#if defined(CONFIG_USER_ONLY)
F_HELPER_0_0(qtox);
#endif
F_HELPER_0_0(aligndata);
void TCG_HELPER_PROTO helper_movl_FT0_0(void);
void TCG_HELPER_PROTO helper_movl_DT0_0(void);
void TCG_HELPER_PROTO helper_movl_FT0_1(void);
void TCG_HELPER_PROTO helper_movl_DT0_1(void);
F_HELPER_0_0(not);
F_HELPER_0_0(nots);
F_HELPER_0_0(nor);
F_HELPER_0_0(nors);
F_HELPER_0_0(or);
F_HELPER_0_0(ors);
F_HELPER_0_0(xor);
F_HELPER_0_0(xors);
F_HELPER_0_0(and);
F_HELPER_0_0(ands);
F_HELPER_0_0(ornot);
F_HELPER_0_0(ornots);
F_HELPER_0_0(andnot);
F_HELPER_0_0(andnots);
F_HELPER_0_0(nand);
F_HELPER_0_0(nands);
F_HELPER_0_0(xnor);
F_HELPER_0_0(xnors);
F_HELPER_0_0(pmerge);
F_HELPER_0_0(mul8x16);
F_HELPER_0_0(mul8x16al);
F_HELPER_0_0(mul8x16au);
F_HELPER_0_0(mul8sux16);
F_HELPER_0_0(mul8ulx16);
F_HELPER_0_0(muld8sux16);
F_HELPER_0_0(muld8ulx16);
F_HELPER_0_0(expand);
#define VIS_HELPER(name) \
F_HELPER_0_0(name##16); \
F_HELPER_0_0(name##16s); \
F_HELPER_0_0(name##32); \
F_HELPER_0_0(name##32s)
VIS_HELPER(padd);
VIS_HELPER(psub);
#define VIS_CMPHELPER(name) \
F_HELPER_0_0(name##16); \
F_HELPER_0_0(name##32)
VIS_CMPHELPER(cmpgt);
VIS_CMPHELPER(cmpeq);
VIS_CMPHELPER(cmple);
VIS_CMPHELPER(cmpne);
#endif
#undef F_HELPER_0_0
#undef F_HELPER_SDQ_0_0
#undef VIS_HELPER
#undef VIS_CMPHELPER
......@@ -107,216 +107,7 @@ void OPPROTO op_jmp_label(void)
GOTO_LABEL_PARAM(1);
}
#define F_OP(name, p) void OPPROTO op_f##name##p(void)
#if defined(CONFIG_USER_ONLY)
#define F_BINOP(name) \
F_OP(name, s) \
{ \
FT0 = float32_ ## name (FT0, FT1, &env->fp_status); \
} \
F_OP(name, d) \
{ \
DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
} \
F_OP(name, q) \
{ \
QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
}
#else
#define F_BINOP(name) \
F_OP(name, s) \
{ \
FT0 = float32_ ## name (FT0, FT1, &env->fp_status); \
} \
F_OP(name, d) \
{ \
DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
}
#endif
F_BINOP(add);
F_BINOP(sub);
F_BINOP(mul);
F_BINOP(div);
#undef F_BINOP
void OPPROTO op_fsmuld(void)
{
DT0 = float64_mul(float32_to_float64(FT0, &env->fp_status),
float32_to_float64(FT1, &env->fp_status),
&env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
void OPPROTO op_fdmulq(void)
{
QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
float64_to_float128(DT1, &env->fp_status),
&env->fp_status);
}
#endif
#if defined(CONFIG_USER_ONLY)
#define F_HELPER(name) \
F_OP(name, s) \
{ \
do_f##name##s(); \
} \
F_OP(name, d) \
{ \
do_f##name##d(); \
} \
F_OP(name, q) \
{ \
do_f##name##q(); \
}
#else
#define F_HELPER(name) \
F_OP(name, s) \
{ \
do_f##name##s(); \
} \
F_OP(name, d) \
{ \
do_f##name##d(); \
}
#endif
F_OP(neg, s)
{
FT0 = float32_chs(FT1);
}
#ifdef TARGET_SPARC64
F_OP(neg, d)
{
DT0 = float64_chs(DT1);
}
#if defined(CONFIG_USER_ONLY)
F_OP(neg, q)
{
QT0 = float128_chs(QT1);
}
#endif
#endif
/* Integer to float conversion. */
#ifdef USE_INT_TO_FLOAT_HELPERS
F_HELPER(ito);
#ifdef TARGET_SPARC64
F_HELPER(xto);
#endif
#else
F_OP(ito, s)
{
FT0 = int32_to_float32(*((int32_t *)&FT1), &env->fp_status);
}
F_OP(ito, d)
{
DT0 = int32_to_float64(*((int32_t *)&FT1), &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
F_OP(ito, q)
{
QT0 = int32_to_float128(*((int32_t *)&FT1), &env->fp_status);
}
#endif
#ifdef TARGET_SPARC64
F_OP(xto, s)
{
FT0 = int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
}
F_OP(xto, d)
{
DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
F_OP(xto, q)
{
QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
}
#endif
#endif
#endif
#undef F_HELPER
/* floating point conversion */
void OPPROTO op_fdtos(void)
{
FT0 = float64_to_float32(DT1, &env->fp_status);
}
void OPPROTO op_fstod(void)
{
DT0 = float32_to_float64(FT1, &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
void OPPROTO op_fqtos(void)
{
FT0 = float128_to_float32(QT1, &env->fp_status);
}
void OPPROTO op_fstoq(void)
{
QT0 = float32_to_float128(FT1, &env->fp_status);
}
void OPPROTO op_fqtod(void)
{
DT0 = float128_to_float64(QT1, &env->fp_status);
}
void OPPROTO op_fdtoq(void)
{
QT0 = float64_to_float128(DT1, &env->fp_status);
}
#endif
/* Float to integer conversion. */
void OPPROTO op_fstoi(void)
{
*((int32_t *)&FT0) = float32_to_int32_round_to_zero(FT1, &env->fp_status);
}
void OPPROTO op_fdtoi(void)
{
*((int32_t *)&FT0) = float64_to_int32_round_to_zero(DT1, &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
void OPPROTO op_fqtoi(void)
{
*((int32_t *)&FT0) = float128_to_int32_round_to_zero(QT1, &env->fp_status);
}
#endif
#ifdef TARGET_SPARC64
void OPPROTO op_fstox(void)
{
*((int64_t *)&DT0) = float32_to_int64_round_to_zero(FT1, &env->fp_status);
}
void OPPROTO op_fdtox(void)
{
*((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
void OPPROTO op_fqtox(void)
{
*((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
}
#endif
void OPPROTO op_flushw(void)
{
if (env->cansave != NWINDOWS - 2) {
......@@ -349,451 +140,6 @@ void OPPROTO op_restored(void)
}
#endif
#ifdef TARGET_SPARC64
void OPPROTO op_faligndata()
{
uint64_t tmp;
tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
*((uint64_t *)&DT0) = tmp;
}
void OPPROTO op_movl_FT0_0(void)
{
*((uint32_t *)&FT0) = 0;
}
void OPPROTO op_movl_DT0_0(void)
{
*((uint64_t *)&DT0) = 0;
}
void OPPROTO op_movl_FT0_1(void)
{
*((uint32_t *)&FT0) = 0xffffffff;
}
void OPPROTO op_movl_DT0_1(void)
{
*((uint64_t *)&DT0) = 0xffffffffffffffffULL;
}
void OPPROTO op_fnot(void)
{
*(uint64_t *)&DT0 = ~*(uint64_t *)&DT1;
}
void OPPROTO op_fnots(void)
{
*(uint32_t *)&FT0 = ~*(uint32_t *)&FT1;
}
void OPPROTO op_fnor(void)
{
*(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 | *(uint64_t *)&DT1);
}
void OPPROTO op_fnors(void)
{
*(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 | *(uint32_t *)&FT1);
}
void OPPROTO op_for(void)
{
*(uint64_t *)&DT0 |= *(uint64_t *)&DT1;
}
void OPPROTO op_fors(void)
{
*(uint32_t *)&FT0 |= *(uint32_t *)&FT1;
}
void OPPROTO op_fxor(void)
{
*(uint64_t *)&DT0 ^= *(uint64_t *)&DT1;
}
void OPPROTO op_fxors(void)
{
*(uint32_t *)&FT0 ^= *(uint32_t *)&FT1;
}
void OPPROTO op_fand(void)
{
*(uint64_t *)&DT0 &= *(uint64_t *)&DT1;
}
void OPPROTO op_fands(void)
{
*(uint32_t *)&FT0 &= *(uint32_t *)&FT1;
}
void OPPROTO op_fornot(void)
{
*(uint64_t *)&DT0 = *(uint64_t *)&DT0 | ~*(uint64_t *)&DT1;
}
void OPPROTO op_fornots(void)
{
*(uint32_t *)&FT0 = *(uint32_t *)&FT0 | ~*(uint32_t *)&FT1;
}
void OPPROTO op_fandnot(void)
{
*(uint64_t *)&DT0 = *(uint64_t *)&DT0 & ~*(uint64_t *)&DT1;
}
void OPPROTO op_fandnots(void)
{
*(uint32_t *)&FT0 = *(uint32_t *)&FT0 & ~*(uint32_t *)&FT1;
}
void OPPROTO op_fnand(void)
{
*(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 & *(uint64_t *)&DT1);
}
void OPPROTO op_fnands(void)
{
*(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 & *(uint32_t *)&FT1);
}
void OPPROTO op_fxnor(void)
{
*(uint64_t *)&DT0 ^= ~*(uint64_t *)&DT1;
}
void OPPROTO op_fxnors(void)
{
*(uint32_t *)&FT0 ^= ~*(uint32_t *)&FT1;
}
#ifdef WORDS_BIGENDIAN
#define VIS_B64(n) b[7 - (n)]
#define VIS_W64(n) w[3 - (n)]
#define VIS_SW64(n) sw[3 - (n)]
#define VIS_L64(n) l[1 - (n)]
#define VIS_B32(n) b[3 - (n)]
#define VIS_W32(n) w[1 - (n)]
#else
#define VIS_B64(n) b[n]
#define VIS_W64(n) w[n]
#define VIS_SW64(n) sw[n]
#define VIS_L64(n) l[n]
#define VIS_B32(n) b[n]
#define VIS_W32(n) w[n]
#endif
typedef union {
uint8_t b[8];
uint16_t w[4];
int16_t sw[4];
uint32_t l[2];
float64 d;
} vis64;
typedef union {
uint8_t b[4];
uint16_t w[2];
uint32_t l;
float32 f;
} vis32;
void OPPROTO op_fpmerge(void)
{
vis64 s, d;
s.d = DT0;
d.d = DT1;
// Reverse calculation order to handle overlap
d.VIS_B64(7) = s.VIS_B64(3);
d.VIS_B64(6) = d.VIS_B64(3);
d.VIS_B64(5) = s.VIS_B64(2);
d.VIS_B64(4) = d.VIS_B64(2);
d.VIS_B64(3) = s.VIS_B64(1);
d.VIS_B64(2) = d.VIS_B64(1);
d.VIS_B64(1) = s.VIS_B64(0);
//d.VIS_B64(0) = d.VIS_B64(0);
DT0 = d.d;
}
void OPPROTO op_fmul8x16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void OPPROTO op_fmul8x16al(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void OPPROTO op_fmul8x16au(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void OPPROTO op_fmul8sux16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void OPPROTO op_fmul8ulx16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void OPPROTO op_fmuld8sux16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_L64(r) = tmp;
// Reverse calculation order to handle overlap
PMUL(1);
PMUL(0);
#undef PMUL
DT0 = d.d;
}
void OPPROTO op_fmuld8ulx16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_L64(r) = tmp;
// Reverse calculation order to handle overlap
PMUL(1);
PMUL(0);
#undef PMUL
DT0 = d.d;
}
void OPPROTO op_fexpand(void)
{
vis32 s;
vis64 d;
s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
d.d = DT1;
d.VIS_L64(0) = s.VIS_W32(0) << 4;
d.VIS_L64(1) = s.VIS_W32(1) << 4;
d.VIS_L64(2) = s.VIS_W32(2) << 4;
d.VIS_L64(3) = s.VIS_W32(3) << 4;
DT0 = d.d;
}
#define VIS_OP(name, F) \
void OPPROTO name##16(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
\
DT0 = d.d; \
} \
\
void OPPROTO name##16s(void) \
{ \
vis32 s, d; \
\
s.f = FT0; \
d.f = FT1; \
\
d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
\
FT0 = d.f; \
} \
\
void OPPROTO name##32(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
\
DT0 = d.d; \
} \
\
void OPPROTO name##32s(void) \
{ \
vis32 s, d; \
\
s.f = FT0; \
d.f = FT1; \
\
d.l = F(d.l, s.l); \
\
FT0 = d.f; \
}
#define FADD(a, b) ((a) + (b))
#define FSUB(a, b) ((a) - (b))
VIS_OP(op_fpadd, FADD)
VIS_OP(op_fpsub, FSUB)
#define VIS_CMPOP(name, F) \
void OPPROTO name##16(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
\
DT0 = d.d; \
} \
\
void OPPROTO name##32(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
\
DT0 = d.d; \
}
#define FCMPGT(a, b) ((a) > (b))
#define FCMPEQ(a, b) ((a) == (b))
#define FCMPLE(a, b) ((a) <= (b))
#define FCMPNE(a, b) ((a) != (b))
VIS_CMPOP(op_fcmpgt, FCMPGT)
VIS_CMPOP(op_fcmpeq, FCMPEQ)
VIS_CMPOP(op_fcmple, FCMPLE)
VIS_CMPOP(op_fcmpne, FCMPNE)
#endif
#define CHECK_ALIGN_OP(align) \
void OPPROTO op_check_align_T0_ ## align (void) \
{ \
......
......@@ -50,78 +50,658 @@ void helper_trapcc(target_ulong nb_trap, target_ulong do_trap)
}
}
void helper_check_ieee_exceptions(void)
#define F_HELPER(name, p) void helper_f##name##p(void)
#if defined(CONFIG_USER_ONLY)
#define F_BINOP(name) \
F_HELPER(name, s) \
{ \
FT0 = float32_ ## name (FT0, FT1, &env->fp_status); \
} \
F_HELPER(name, d) \
{ \
DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
} \
F_HELPER(name, q) \
{ \
QT0 = float128_ ## name (QT0, QT1, &env->fp_status); \
}
#else
#define F_BINOP(name) \
F_HELPER(name, s) \
{ \
FT0 = float32_ ## name (FT0, FT1, &env->fp_status); \
} \
F_HELPER(name, d) \
{ \
DT0 = float64_ ## name (DT0, DT1, &env->fp_status); \
}
#endif
F_BINOP(add);
F_BINOP(sub);
F_BINOP(mul);
F_BINOP(div);
#undef F_BINOP
void helper_fsmuld(void)
{
target_ulong status;
DT0 = float64_mul(float32_to_float64(FT0, &env->fp_status),
float32_to_float64(FT1, &env->fp_status),
&env->fp_status);
}
status = get_float_exception_flags(&env->fp_status);
if (status) {
/* Copy IEEE 754 flags into FSR */
if (status & float_flag_invalid)
env->fsr |= FSR_NVC;
if (status & float_flag_overflow)
env->fsr |= FSR_OFC;
if (status & float_flag_underflow)
env->fsr |= FSR_UFC;
if (status & float_flag_divbyzero)
env->fsr |= FSR_DZC;
if (status & float_flag_inexact)
env->fsr |= FSR_NXC;
#if defined(CONFIG_USER_ONLY)
void helper_fdmulq(void)
{
QT0 = float128_mul(float64_to_float128(DT0, &env->fp_status),
float64_to_float128(DT1, &env->fp_status),
&env->fp_status);
}
#endif
if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
/* Unmasked exception, generate a trap */
env->fsr |= FSR_FTT_IEEE_EXCP;
raise_exception(TT_FP_EXCP);
} else {
/* Accumulate exceptions */
env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
}
}
F_HELPER(neg, s)
{
FT0 = float32_chs(FT1);
}
void helper_clear_float_exceptions(void)
#ifdef TARGET_SPARC64
F_HELPER(neg, d)
{
set_float_exception_flags(0, &env->fp_status);
DT0 = float64_chs(DT1);
}
#ifdef USE_INT_TO_FLOAT_HELPERS
void do_fitos(void)
#if defined(CONFIG_USER_ONLY)
F_HELPER(neg, q)
{
QT0 = float128_chs(QT1);
}
#endif
#endif
/* Integer to float conversion. */
F_HELPER(ito, s)
{
FT0 = int32_to_float32(*((int32_t *)&FT1), &env->fp_status);
}
void do_fitod(void)
F_HELPER(ito, d)
{
DT0 = int32_to_float64(*((int32_t *)&FT1), &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
void do_fitoq(void)
F_HELPER(ito, q)
{
QT0 = int32_to_float128(*((int32_t *)&FT1), &env->fp_status);
}
#endif
#ifdef TARGET_SPARC64
void do_fxtos(void)
F_HELPER(xto, s)
{
FT0 = int64_to_float32(*((int64_t *)&DT1), &env->fp_status);
}
void do_fxtod(void)
F_HELPER(xto, d)
{
DT0 = int64_to_float64(*((int64_t *)&DT1), &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
F_HELPER(xto, q)
{
QT0 = int64_to_float128(*((int64_t *)&DT1), &env->fp_status);
}
#endif
#endif
#undef F_HELPER
/* floating point conversion */
void helper_fdtos(void)
{
FT0 = float64_to_float32(DT1, &env->fp_status);
}
void helper_fstod(void)
{
DT0 = float32_to_float64(FT1, &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
void helper_fqtos(void)
{
FT0 = float128_to_float32(QT1, &env->fp_status);
}
void helper_fstoq(void)
{
QT0 = float32_to_float128(FT1, &env->fp_status);
}
void helper_fqtod(void)
{
DT0 = float128_to_float64(QT1, &env->fp_status);
}
void helper_fdtoq(void)
{
QT0 = float64_to_float128(DT1, &env->fp_status);
}
#endif
/* Float to integer conversion. */
void helper_fstoi(void)
{
*((int32_t *)&FT0) = float32_to_int32_round_to_zero(FT1, &env->fp_status);
}
void helper_fdtoi(void)
{
*((int32_t *)&FT0) = float64_to_int32_round_to_zero(DT1, &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
void do_fxtoq(void)
void helper_fqtoi(void)
{
QT0 = int64_to_float128(*((int32_t *)&DT1), &env->fp_status);
*((int32_t *)&FT0) = float128_to_int32_round_to_zero(QT1, &env->fp_status);
}
#endif
#ifdef TARGET_SPARC64
void helper_fstox(void)
{
*((int64_t *)&DT0) = float32_to_int64_round_to_zero(FT1, &env->fp_status);
}
void helper_fdtox(void)
{
*((int64_t *)&DT0) = float64_to_int64_round_to_zero(DT1, &env->fp_status);
}
#if defined(CONFIG_USER_ONLY)
void helper_fqtox(void)
{
*((int64_t *)&DT0) = float128_to_int64_round_to_zero(QT1, &env->fp_status);
}
#endif
void helper_faligndata(void)
{
uint64_t tmp;
tmp = (*((uint64_t *)&DT0)) << ((env->gsr & 7) * 8);
tmp |= (*((uint64_t *)&DT1)) >> (64 - (env->gsr & 7) * 8);
*((uint64_t *)&DT0) = tmp;
}
void helper_movl_FT0_0(void)
{
*((uint32_t *)&FT0) = 0;
}
void helper_movl_DT0_0(void)
{
*((uint64_t *)&DT0) = 0;
}
void helper_movl_FT0_1(void)
{
*((uint32_t *)&FT0) = 0xffffffff;
}
void helper_movl_DT0_1(void)
{
*((uint64_t *)&DT0) = 0xffffffffffffffffULL;
}
void helper_fnot(void)
{
*(uint64_t *)&DT0 = ~*(uint64_t *)&DT1;
}
void helper_fnots(void)
{
*(uint32_t *)&FT0 = ~*(uint32_t *)&FT1;
}
void helper_fnor(void)
{
*(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 | *(uint64_t *)&DT1);
}
void helper_fnors(void)
{
*(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 | *(uint32_t *)&FT1);
}
void helper_for(void)
{
*(uint64_t *)&DT0 |= *(uint64_t *)&DT1;
}
void helper_fors(void)
{
*(uint32_t *)&FT0 |= *(uint32_t *)&FT1;
}
void helper_fxor(void)
{
*(uint64_t *)&DT0 ^= *(uint64_t *)&DT1;
}
void helper_fxors(void)
{
*(uint32_t *)&FT0 ^= *(uint32_t *)&FT1;
}
void helper_fand(void)
{
*(uint64_t *)&DT0 &= *(uint64_t *)&DT1;
}
void helper_fands(void)
{
*(uint32_t *)&FT0 &= *(uint32_t *)&FT1;
}
void helper_fornot(void)
{
*(uint64_t *)&DT0 = *(uint64_t *)&DT0 | ~*(uint64_t *)&DT1;
}
void helper_fornots(void)
{
*(uint32_t *)&FT0 = *(uint32_t *)&FT0 | ~*(uint32_t *)&FT1;
}
void helper_fandnot(void)
{
*(uint64_t *)&DT0 = *(uint64_t *)&DT0 & ~*(uint64_t *)&DT1;
}
void helper_fandnots(void)
{
*(uint32_t *)&FT0 = *(uint32_t *)&FT0 & ~*(uint32_t *)&FT1;
}
void helper_fnand(void)
{
*(uint64_t *)&DT0 = ~(*(uint64_t *)&DT0 & *(uint64_t *)&DT1);
}
void helper_fnands(void)
{
*(uint32_t *)&FT0 = ~(*(uint32_t *)&FT0 & *(uint32_t *)&FT1);
}
void helper_fxnor(void)
{
*(uint64_t *)&DT0 ^= ~*(uint64_t *)&DT1;
}
void helper_fxnors(void)
{
*(uint32_t *)&FT0 ^= ~*(uint32_t *)&FT1;
}
#ifdef WORDS_BIGENDIAN
#define VIS_B64(n) b[7 - (n)]
#define VIS_W64(n) w[3 - (n)]
#define VIS_SW64(n) sw[3 - (n)]
#define VIS_L64(n) l[1 - (n)]
#define VIS_B32(n) b[3 - (n)]
#define VIS_W32(n) w[1 - (n)]
#else
#define VIS_B64(n) b[n]
#define VIS_W64(n) w[n]
#define VIS_SW64(n) sw[n]
#define VIS_L64(n) l[n]
#define VIS_B32(n) b[n]
#define VIS_W32(n) w[n]
#endif
typedef union {
uint8_t b[8];
uint16_t w[4];
int16_t sw[4];
uint32_t l[2];
float64 d;
} vis64;
typedef union {
uint8_t b[4];
uint16_t w[2];
uint32_t l;
float32 f;
} vis32;
void helper_fpmerge(void)
{
vis64 s, d;
s.d = DT0;
d.d = DT1;
// Reverse calculation order to handle overlap
d.VIS_B64(7) = s.VIS_B64(3);
d.VIS_B64(6) = d.VIS_B64(3);
d.VIS_B64(5) = s.VIS_B64(2);
d.VIS_B64(4) = d.VIS_B64(2);
d.VIS_B64(3) = s.VIS_B64(1);
d.VIS_B64(2) = d.VIS_B64(1);
d.VIS_B64(1) = s.VIS_B64(0);
//d.VIS_B64(0) = d.VIS_B64(0);
DT0 = d.d;
}
void helper_fmul8x16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void helper_fmul8x16al(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void helper_fmul8x16au(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void helper_fmul8sux16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void helper_fmul8ulx16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_W64(r) = tmp >> 8;
PMUL(0);
PMUL(1);
PMUL(2);
PMUL(3);
#undef PMUL
DT0 = d.d;
}
void helper_fmuld8sux16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_L64(r) = tmp;
// Reverse calculation order to handle overlap
PMUL(1);
PMUL(0);
#undef PMUL
DT0 = d.d;
}
void helper_fmuld8ulx16(void)
{
vis64 s, d;
uint32_t tmp;
s.d = DT0;
d.d = DT1;
#define PMUL(r) \
tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
if ((tmp & 0xff) > 0x7f) \
tmp += 0x100; \
d.VIS_L64(r) = tmp;
// Reverse calculation order to handle overlap
PMUL(1);
PMUL(0);
#undef PMUL
DT0 = d.d;
}
void helper_fexpand(void)
{
vis32 s;
vis64 d;
s.l = (uint32_t)(*(uint64_t *)&DT0 & 0xffffffff);
d.d = DT1;
d.VIS_L64(0) = s.VIS_W32(0) << 4;
d.VIS_L64(1) = s.VIS_W32(1) << 4;
d.VIS_L64(2) = s.VIS_W32(2) << 4;
d.VIS_L64(3) = s.VIS_W32(3) << 4;
DT0 = d.d;
}
#define VIS_HELPER(name, F) \
void name##16(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
\
DT0 = d.d; \
} \
\
void name##16s(void) \
{ \
vis32 s, d; \
\
s.f = FT0; \
d.f = FT1; \
\
d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
\
FT0 = d.f; \
} \
\
void name##32(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
\
DT0 = d.d; \
} \
\
void name##32s(void) \
{ \
vis32 s, d; \
\
s.f = FT0; \
d.f = FT1; \
\
d.l = F(d.l, s.l); \
\
FT0 = d.f; \
}
#define FADD(a, b) ((a) + (b))
#define FSUB(a, b) ((a) - (b))
VIS_HELPER(helper_fpadd, FADD)
VIS_HELPER(helper_fpsub, FSUB)
#define VIS_CMPHELPER(name, F) \
void name##16(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0))? 1: 0; \
d.VIS_W64(0) |= F(d.VIS_W64(1), s.VIS_W64(1))? 2: 0; \
d.VIS_W64(0) |= F(d.VIS_W64(2), s.VIS_W64(2))? 4: 0; \
d.VIS_W64(0) |= F(d.VIS_W64(3), s.VIS_W64(3))? 8: 0; \
\
DT0 = d.d; \
} \
\
void name##32(void) \
{ \
vis64 s, d; \
\
s.d = DT0; \
d.d = DT1; \
\
d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0))? 1: 0; \
d.VIS_L64(0) |= F(d.VIS_L64(1), s.VIS_L64(1))? 2: 0; \
\
DT0 = d.d; \
}
#define FCMPGT(a, b) ((a) > (b))
#define FCMPEQ(a, b) ((a) == (b))
#define FCMPLE(a, b) ((a) <= (b))
#define FCMPNE(a, b) ((a) != (b))
VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
VIS_CMPHELPER(helper_fcmple, FCMPLE)
VIS_CMPHELPER(helper_fcmpne, FCMPNE)
#endif
void helper_check_ieee_exceptions(void)
{
target_ulong status;
status = get_float_exception_flags(&env->fp_status);
if (status) {
/* Copy IEEE 754 flags into FSR */
if (status & float_flag_invalid)
env->fsr |= FSR_NVC;
if (status & float_flag_overflow)
env->fsr |= FSR_OFC;
if (status & float_flag_underflow)
env->fsr |= FSR_UFC;
if (status & float_flag_divbyzero)
env->fsr |= FSR_DZC;
if (status & float_flag_inexact)
env->fsr |= FSR_NXC;
if ((env->fsr & FSR_CEXC_MASK) & ((env->fsr & FSR_TEM_MASK) >> 23)) {
/* Unmasked exception, generate a trap */
env->fsr |= FSR_FTT_IEEE_EXCP;
raise_exception(TT_FP_EXCP);
} else {
/* Accumulate exceptions */
env->fsr |= (env->fsr & FSR_CEXC_MASK) << 5;
}
}
}
void helper_clear_float_exceptions(void)
{
set_float_exception_flags(0, &env->fp_status);
}
void helper_fabss(void)
{
FT0 = float32_abs(FT1);
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册