提交 4373f3ce 编写于 作者: P pbrook

ARM TCG conversion 10/16.

git-svn-id: svn://svn.savannah.nongnu.org/qemu/trunk@4147 c046a42c-6fe2-441c-8c8c-71466251a162
上级 b0109805
......@@ -168,9 +168,6 @@ typedef struct CPUARMState {
int vec_len;
int vec_stride;
/* Temporary variables if we don't have spare fp regs. */
float32 tmp0s, tmp1s;
float64 tmp0d, tmp1d;
/* scratch space when Tn are not sufficient. */
uint32_t scratch[8];
......
......@@ -25,13 +25,6 @@ register uint32_t T0 asm(AREG1);
register uint32_t T1 asm(AREG2);
register uint32_t T2 asm(AREG3);
/* TODO: Put these in FP regs on targets that have such things. */
/* It is ok for FT0s and FT0d to overlap. Likewise FT1s and FT1d. */
#define FT0s env->vfp.tmp0s
#define FT1s env->vfp.tmp1s
#define FT0d env->vfp.tmp0d
#define FT1d env->vfp.tmp1d
#define M0 env->iwmmxt.val
#include "cpu.h"
......@@ -83,23 +76,5 @@ void cpu_loop_exit(void);
void raise_exception(int);
void do_vfp_abss(void);
void do_vfp_absd(void);
void do_vfp_negs(void);
void do_vfp_negd(void);
void do_vfp_sqrts(void);
void do_vfp_sqrtd(void);
void do_vfp_cmps(void);
void do_vfp_cmpd(void);
void do_vfp_cmpes(void);
void do_vfp_cmped(void);
void do_vfp_set_fpscr(void);
void do_vfp_get_fpscr(void);
float32 helper_recps_f32(float32, float32);
float32 helper_rsqrts_f32(float32, float32);
uint32_t helper_recpe_u32(uint32_t);
uint32_t helper_rsqrte_u32(uint32_t);
float32 helper_recpe_f32(float32);
float32 helper_rsqrte_f32(float32);
void helper_neon_tbl(int rn, int maxindex);
uint32_t helper_neon_mul_p8(uint32_t op1, uint32_t op2);
......@@ -2167,3 +2167,366 @@ uint32_t HELPER(sel_flags)(uint32_t flags, uint32_t a, uint32_t b)
return (a & mask) | (b & ~mask);
}
/* VFP support. We follow the convention used for VFP instrunctions:
Single precition routines have a "s" suffix, double precision a
"d" suffix. */
/* Convert host exception flags to vfp form. */
static inline int vfp_exceptbits_from_host(int host_bits)
{
int target_bits = 0;
if (host_bits & float_flag_invalid)
target_bits |= 1;
if (host_bits & float_flag_divbyzero)
target_bits |= 2;
if (host_bits & float_flag_overflow)
target_bits |= 4;
if (host_bits & float_flag_underflow)
target_bits |= 8;
if (host_bits & float_flag_inexact)
target_bits |= 0x10;
return target_bits;
}
uint32_t HELPER(vfp_get_fpscr)(CPUState *env)
{
int i;
uint32_t fpscr;
fpscr = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff)
| (env->vfp.vec_len << 16)
| (env->vfp.vec_stride << 20);
i = get_float_exception_flags(&env->vfp.fp_status);
fpscr |= vfp_exceptbits_from_host(i);
return fpscr;
}
/* Convert vfp exception flags to target form. */
static inline int vfp_exceptbits_to_host(int target_bits)
{
int host_bits = 0;
if (target_bits & 1)
host_bits |= float_flag_invalid;
if (target_bits & 2)
host_bits |= float_flag_divbyzero;
if (target_bits & 4)
host_bits |= float_flag_overflow;
if (target_bits & 8)
host_bits |= float_flag_underflow;
if (target_bits & 0x10)
host_bits |= float_flag_inexact;
return host_bits;
}
void HELPER(vfp_set_fpscr)(CPUState *env, uint32_t val)
{
int i;
uint32_t changed;
changed = env->vfp.xregs[ARM_VFP_FPSCR];
env->vfp.xregs[ARM_VFP_FPSCR] = (val & 0xffc8ffff);
env->vfp.vec_len = (val >> 16) & 7;
env->vfp.vec_stride = (val >> 20) & 3;
changed ^= val;
if (changed & (3 << 22)) {
i = (val >> 22) & 3;
switch (i) {
case 0:
i = float_round_nearest_even;
break;
case 1:
i = float_round_up;
break;
case 2:
i = float_round_down;
break;
case 3:
i = float_round_to_zero;
break;
}
set_float_rounding_mode(i, &env->vfp.fp_status);
}
i = vfp_exceptbits_to_host((val >> 8) & 0x1f);
set_float_exception_flags(i, &env->vfp.fp_status);
/* XXX: FZ and DN are not implemented. */
}
#define VFP_HELPER(name, p) HELPER(glue(glue(vfp_,name),p))
#define VFP_BINOP(name) \
float32 VFP_HELPER(name, s)(float32 a, float32 b, CPUState *env) \
{ \
return float32_ ## name (a, b, &env->vfp.fp_status); \
} \
float64 VFP_HELPER(name, d)(float64 a, float64 b, CPUState *env) \
{ \
return float64_ ## name (a, b, &env->vfp.fp_status); \
}
VFP_BINOP(add)
VFP_BINOP(sub)
VFP_BINOP(mul)
VFP_BINOP(div)
#undef VFP_BINOP
float32 VFP_HELPER(neg, s)(float32 a)
{
return float32_chs(a);
}
float64 VFP_HELPER(neg, d)(float64 a)
{
return float32_chs(a);
}
float32 VFP_HELPER(abs, s)(float32 a)
{
return float32_abs(a);
}
float64 VFP_HELPER(abs, d)(float64 a)
{
return float32_abs(a);
}
float32 VFP_HELPER(sqrt, s)(float32 a, CPUState *env)
{
return float32_sqrt(a, &env->vfp.fp_status);
}
float64 VFP_HELPER(sqrt, d)(float64 a, CPUState *env)
{
return float64_sqrt(a, &env->vfp.fp_status);
}
/* XXX: check quiet/signaling case */
#define DO_VFP_cmp(p, type) \
void VFP_HELPER(cmp, p)(type a, type b, CPUState *env) \
{ \
uint32_t flags; \
switch(type ## _compare_quiet(a, b, &env->vfp.fp_status)) { \
case 0: flags = 0x6; break; \
case -1: flags = 0x8; break; \
case 1: flags = 0x2; break; \
default: case 2: flags = 0x3; break; \
} \
env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
| (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
} \
void VFP_HELPER(cmpe, p)(type a, type b, CPUState *env) \
{ \
uint32_t flags; \
switch(type ## _compare(a, b, &env->vfp.fp_status)) { \
case 0: flags = 0x6; break; \
case -1: flags = 0x8; break; \
case 1: flags = 0x2; break; \
default: case 2: flags = 0x3; break; \
} \
env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28) \
| (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
}
DO_VFP_cmp(s, float32)
DO_VFP_cmp(d, float64)
#undef DO_VFP_cmp
/* Helper routines to perform bitwise copies between float and int. */
static inline float32 vfp_itos(uint32_t i)
{
union {
uint32_t i;
float32 s;
} v;
v.i = i;
return v.s;
}
static inline uint32_t vfp_stoi(float32 s)
{
union {
uint32_t i;
float32 s;
} v;
v.s = s;
return v.i;
}
static inline float64 vfp_itod(uint64_t i)
{
union {
uint64_t i;
float64 d;
} v;
v.i = i;
return v.d;
}
static inline uint64_t vfp_dtoi(float64 d)
{
union {
uint64_t i;
float64 d;
} v;
v.d = d;
return v.i;
}
/* Integer to float conversion. */
float32 VFP_HELPER(uito, s)(float32 x, CPUState *env)
{
return uint32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
}
float64 VFP_HELPER(uito, d)(float32 x, CPUState *env)
{
return uint32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
}
float32 VFP_HELPER(sito, s)(float32 x, CPUState *env)
{
return int32_to_float32(vfp_stoi(x), &env->vfp.fp_status);
}
float64 VFP_HELPER(sito, d)(float32 x, CPUState *env)
{
return int32_to_float64(vfp_stoi(x), &env->vfp.fp_status);
}
/* Float to integer conversion. */
float32 VFP_HELPER(toui, s)(float32 x, CPUState *env)
{
return vfp_itos(float32_to_uint32(x, &env->vfp.fp_status));
}
float32 VFP_HELPER(toui, d)(float64 x, CPUState *env)
{
return vfp_itos(float64_to_uint32(x, &env->vfp.fp_status));
}
float32 VFP_HELPER(tosi, s)(float32 x, CPUState *env)
{
return vfp_itos(float32_to_int32(x, &env->vfp.fp_status));
}
float32 VFP_HELPER(tosi, d)(float64 x, CPUState *env)
{
return vfp_itos(float64_to_int32(x, &env->vfp.fp_status));
}
float32 VFP_HELPER(touiz, s)(float32 x, CPUState *env)
{
return vfp_itos(float32_to_uint32_round_to_zero(x, &env->vfp.fp_status));
}
float32 VFP_HELPER(touiz, d)(float64 x, CPUState *env)
{
return vfp_itos(float64_to_uint32_round_to_zero(x, &env->vfp.fp_status));
}
float32 VFP_HELPER(tosiz, s)(float32 x, CPUState *env)
{
return vfp_itos(float32_to_int32_round_to_zero(x, &env->vfp.fp_status));
}
float32 VFP_HELPER(tosiz, d)(float64 x, CPUState *env)
{
return vfp_itos(float64_to_int32_round_to_zero(x, &env->vfp.fp_status));
}
/* floating point conversion */
float64 VFP_HELPER(fcvtd, s)(float32 x, CPUState *env)
{
return float32_to_float64(x, &env->vfp.fp_status);
}
float32 VFP_HELPER(fcvts, d)(float64 x, CPUState *env)
{
return float64_to_float32(x, &env->vfp.fp_status);
}
/* VFP3 fixed point conversion. */
#define VFP_CONV_FIX(name, p, ftype, itype, sign) \
ftype VFP_HELPER(name##to, p)(ftype x, uint32_t shift, CPUState *env) \
{ \
ftype tmp; \
tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(x), \
&env->vfp.fp_status); \
return ftype##_scalbn(tmp, shift, &env->vfp.fp_status); \
} \
ftype VFP_HELPER(to##name, p)(ftype x, uint32_t shift, CPUState *env) \
{ \
ftype tmp; \
tmp = ftype##_scalbn(x, shift, &env->vfp.fp_status); \
return vfp_ito##p((itype)ftype##_to_##sign##int32_round_to_zero(tmp, \
&env->vfp.fp_status)); \
}
VFP_CONV_FIX(sh, d, float64, int16, )
VFP_CONV_FIX(sl, d, float64, int32, )
VFP_CONV_FIX(uh, d, float64, uint16, u)
VFP_CONV_FIX(ul, d, float64, uint32, u)
VFP_CONV_FIX(sh, s, float32, int16, )
VFP_CONV_FIX(sl, s, float32, int32, )
VFP_CONV_FIX(uh, s, float32, uint16, u)
VFP_CONV_FIX(ul, s, float32, uint32, u)
#undef VFP_CONV_FIX
float32 HELPER(recps_f32)(float32 a, float32 b, CPUState *env)
{
float_status *s = &env->vfp.fp_status;
float32 two = int32_to_float32(2, s);
return float32_sub(two, float32_mul(a, b, s), s);
}
float32 HELPER(rsqrts_f32)(float32 a, float32 b, CPUState *env)
{
float_status *s = &env->vfp.fp_status;
float32 three = int32_to_float32(3, s);
return float32_sub(three, float32_mul(a, b, s), s);
}
/* TODO: The architecture specifies the value that the estimate functions
should return. We return the exact reciprocal/root instead. */
float32 HELPER(recpe_f32)(float32 a, CPUState *env)
{
float_status *s = &env->vfp.fp_status;
float32 one = int32_to_float32(1, s);
return float32_div(one, a, s);
}
float32 HELPER(rsqrte_f32)(float32 a, CPUState *env)
{
float_status *s = &env->vfp.fp_status;
float32 one = int32_to_float32(1, s);
return float32_div(one, float32_sqrt(a, s), s);
}
uint32_t HELPER(recpe_u32)(uint32_t a, CPUState *env)
{
float_status *s = &env->vfp.fp_status;
float32 tmp;
tmp = int32_to_float32(a, s);
tmp = float32_scalbn(tmp, -32, s);
tmp = helper_recpe_f32(tmp, env);
tmp = float32_scalbn(tmp, 31, s);
return float32_to_int32(tmp, s);
}
uint32_t HELPER(rsqrte_u32)(uint32_t a, CPUState *env)
{
float_status *s = &env->vfp.fp_status;
float32 tmp;
tmp = int32_to_float32(a, s);
tmp = float32_scalbn(tmp, -32, s);
tmp = helper_rsqrte_f32(tmp, env);
tmp = float32_scalbn(tmp, 31, s);
return float32_to_int32(tmp, s);
}
......@@ -122,6 +122,69 @@ DEF_HELPER_0_3(set_r13_banked, void, (CPUState *, uint32_t, uint32_t))
DEF_HELPER_1_1(get_user_reg, uint32_t, (uint32_t))
DEF_HELPER_0_2(set_user_reg, void, (uint32_t, uint32_t))
DEF_HELPER_1_1(vfp_get_fpscr, uint32_t, (CPUState *))
DEF_HELPER_0_2(vfp_set_fpscr, void, (CPUState *, uint32_t))
DEF_HELPER_1_3(vfp_adds, float32, (float32, float32, CPUState *))
DEF_HELPER_1_3(vfp_addd, float64, (float64, float64, CPUState *))
DEF_HELPER_1_3(vfp_subs, float32, (float32, float32, CPUState *))
DEF_HELPER_1_3(vfp_subd, float64, (float64, float64, CPUState *))
DEF_HELPER_1_3(vfp_muls, float32, (float32, float32, CPUState *))
DEF_HELPER_1_3(vfp_muld, float64, (float64, float64, CPUState *))
DEF_HELPER_1_3(vfp_divs, float32, (float32, float32, CPUState *))
DEF_HELPER_1_3(vfp_divd, float64, (float64, float64, CPUState *))
DEF_HELPER_1_1(vfp_negs, float32, (float32))
DEF_HELPER_1_1(vfp_negd, float64, (float64))
DEF_HELPER_1_1(vfp_abss, float32, (float32))
DEF_HELPER_1_1(vfp_absd, float64, (float64))
DEF_HELPER_1_2(vfp_sqrts, float32, (float32, CPUState *))
DEF_HELPER_1_2(vfp_sqrtd, float64, (float64, CPUState *))
DEF_HELPER_0_3(vfp_cmps, void, (float32, float32, CPUState *))
DEF_HELPER_0_3(vfp_cmpd, void, (float64, float64, CPUState *))
DEF_HELPER_0_3(vfp_cmpes, void, (float32, float32, CPUState *))
DEF_HELPER_0_3(vfp_cmped, void, (float64, float64, CPUState *))
DEF_HELPER_1_2(vfp_fcvtds, float64, (float32, CPUState *))
DEF_HELPER_1_2(vfp_fcvtsd, float32, (float64, CPUState *))
DEF_HELPER_1_2(vfp_uitos, float32, (float32, CPUState *))
DEF_HELPER_1_2(vfp_uitod, float64, (float32, CPUState *))
DEF_HELPER_1_2(vfp_sitos, float32, (float32, CPUState *))
DEF_HELPER_1_2(vfp_sitod, float64, (float32, CPUState *))
DEF_HELPER_1_2(vfp_touis, float32, (float32, CPUState *))
DEF_HELPER_1_2(vfp_touid, float32, (float64, CPUState *))
DEF_HELPER_1_2(vfp_touizs, float32, (float32, CPUState *))
DEF_HELPER_1_2(vfp_touizd, float32, (float64, CPUState *))
DEF_HELPER_1_2(vfp_tosis, float32, (float32, CPUState *))
DEF_HELPER_1_2(vfp_tosid, float32, (float64, CPUState *))
DEF_HELPER_1_2(vfp_tosizs, float32, (float32, CPUState *))
DEF_HELPER_1_2(vfp_tosizd, float32, (float64, CPUState *))
DEF_HELPER_1_3(vfp_toshs, float32, (float32, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_tosls, float32, (float32, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_touhs, float32, (float32, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_touls, float32, (float32, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_toshd, float64, (float64, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_tosld, float64, (float64, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_touhd, float64, (float64, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_tould, float64, (float64, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_shtos, float32, (float32, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_sltos, float32, (float32, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_uhtos, float32, (float32, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_ultos, float32, (float32, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_shtod, float64, (float64, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_sltod, float64, (float64, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_uhtod, float64, (float64, uint32_t, CPUState *))
DEF_HELPER_1_3(vfp_ultod, float64, (float64, uint32_t, CPUState *))
DEF_HELPER_1_3(recps_f32, float32, (float32, float32, CPUState *))
DEF_HELPER_1_3(rsqrts_f32, float32, (float32, float32, CPUState *))
DEF_HELPER_1_2(recpe_f32, float32, (float32, CPUState *))
DEF_HELPER_1_2(rsqrte_f32, float32, (float32, CPUState *))
DEF_HELPER_1_2(recpe_u32, uint32_t, (uint32_t, CPUState *))
DEF_HELPER_1_2(rsqrte_u32, uint32_t, (uint32_t, CPUState *))
#undef DEF_HELPER
#undef DEF_HELPER_0_0
#undef DEF_HELPER_0_1
......
......@@ -252,319 +252,6 @@ void OPPROTO op_rorl_T1_T0_cc(void)
FORCE_RET();
}
/* VFP support. We follow the convention used for VFP instrunctions:
Single precition routines have a "s" suffix, double precision a
"d" suffix. */
#define VFP_OP(name, p) void OPPROTO op_vfp_##name##p(void)
#define VFP_BINOP(name) \
VFP_OP(name, s) \
{ \
FT0s = float32_ ## name (FT0s, FT1s, &env->vfp.fp_status); \
} \
VFP_OP(name, d) \
{ \
FT0d = float64_ ## name (FT0d, FT1d, &env->vfp.fp_status); \
}
VFP_BINOP(add)
VFP_BINOP(sub)
VFP_BINOP(mul)
VFP_BINOP(div)
#undef VFP_BINOP
#define VFP_HELPER(name) \
VFP_OP(name, s) \
{ \
do_vfp_##name##s(); \
} \
VFP_OP(name, d) \
{ \
do_vfp_##name##d(); \
}
VFP_HELPER(abs)
VFP_HELPER(sqrt)
VFP_HELPER(cmp)
VFP_HELPER(cmpe)
#undef VFP_HELPER
/* XXX: Will this do the right thing for NANs. Should invert the signbit
without looking at the rest of the value. */
VFP_OP(neg, s)
{
FT0s = float32_chs(FT0s);
}
VFP_OP(neg, d)
{
FT0d = float64_chs(FT0d);
}
VFP_OP(F1_ld0, s)
{
union {
uint32_t i;
float32 s;
} v;
v.i = 0;
FT1s = v.s;
}
VFP_OP(F1_ld0, d)
{
union {
uint64_t i;
float64 d;
} v;
v.i = 0;
FT1d = v.d;
}
/* Helper routines to perform bitwise copies between float and int. */
static inline float32 vfp_itos(uint32_t i)
{
union {
uint32_t i;
float32 s;
} v;
v.i = i;
return v.s;
}
static inline uint32_t vfp_stoi(float32 s)
{
union {
uint32_t i;
float32 s;
} v;
v.s = s;
return v.i;
}
static inline float64 vfp_itod(uint64_t i)
{
union {
uint64_t i;
float64 d;
} v;
v.i = i;
return v.d;
}
static inline uint64_t vfp_dtoi(float64 d)
{
union {
uint64_t i;
float64 d;
} v;
v.d = d;
return v.i;
}
/* Integer to float conversion. */
VFP_OP(uito, s)
{
FT0s = uint32_to_float32(vfp_stoi(FT0s), &env->vfp.fp_status);
}
VFP_OP(uito, d)
{
FT0d = uint32_to_float64(vfp_stoi(FT0s), &env->vfp.fp_status);
}
VFP_OP(sito, s)
{
FT0s = int32_to_float32(vfp_stoi(FT0s), &env->vfp.fp_status);
}
VFP_OP(sito, d)
{
FT0d = int32_to_float64(vfp_stoi(FT0s), &env->vfp.fp_status);
}
/* Float to integer conversion. */
VFP_OP(toui, s)
{
FT0s = vfp_itos(float32_to_uint32(FT0s, &env->vfp.fp_status));
}
VFP_OP(toui, d)
{
FT0s = vfp_itos(float64_to_uint32(FT0d, &env->vfp.fp_status));
}
VFP_OP(tosi, s)
{
FT0s = vfp_itos(float32_to_int32(FT0s, &env->vfp.fp_status));
}
VFP_OP(tosi, d)
{
FT0s = vfp_itos(float64_to_int32(FT0d, &env->vfp.fp_status));
}
/* TODO: Set rounding mode properly. */
VFP_OP(touiz, s)
{
FT0s = vfp_itos(float32_to_uint32_round_to_zero(FT0s, &env->vfp.fp_status));
}
VFP_OP(touiz, d)
{
FT0s = vfp_itos(float64_to_uint32_round_to_zero(FT0d, &env->vfp.fp_status));
}
VFP_OP(tosiz, s)
{
FT0s = vfp_itos(float32_to_int32_round_to_zero(FT0s, &env->vfp.fp_status));
}
VFP_OP(tosiz, d)
{
FT0s = vfp_itos(float64_to_int32_round_to_zero(FT0d, &env->vfp.fp_status));
}
/* floating point conversion */
VFP_OP(fcvtd, s)
{
FT0d = float32_to_float64(FT0s, &env->vfp.fp_status);
}
VFP_OP(fcvts, d)
{
FT0s = float64_to_float32(FT0d, &env->vfp.fp_status);
}
/* VFP3 fixed point conversion. */
#define VFP_CONV_FIX(name, p, ftype, itype, sign) \
VFP_OP(name##to, p) \
{ \
ftype tmp; \
tmp = sign##int32_to_##ftype ((itype)vfp_##p##toi(FT0##p), \
&env->vfp.fp_status); \
FT0##p = ftype##_scalbn(tmp, PARAM1, &env->vfp.fp_status); \
} \
VFP_OP(to##name, p) \
{ \
ftype tmp; \
tmp = ftype##_scalbn(FT0##p, PARAM1, &env->vfp.fp_status); \
FT0##p = vfp_ito##p((itype)ftype##_to_##sign##int32_round_to_zero(tmp, \
&env->vfp.fp_status)); \
}
VFP_CONV_FIX(sh, d, float64, int16, )
VFP_CONV_FIX(sl, d, float64, int32, )
VFP_CONV_FIX(uh, d, float64, uint16, u)
VFP_CONV_FIX(ul, d, float64, uint32, u)
VFP_CONV_FIX(sh, s, float32, int16, )
VFP_CONV_FIX(sl, s, float32, int32, )
VFP_CONV_FIX(uh, s, float32, uint16, u)
VFP_CONV_FIX(ul, s, float32, uint32, u)
/* Get and Put values from registers. */
VFP_OP(getreg_F0, d)
{
FT0d = *(float64 *)((char *) env + PARAM1);
}
VFP_OP(getreg_F0, s)
{
FT0s = *(float32 *)((char *) env + PARAM1);
}
VFP_OP(getreg_F1, d)
{
FT1d = *(float64 *)((char *) env + PARAM1);
}
VFP_OP(getreg_F1, s)
{
FT1s = *(float32 *)((char *) env + PARAM1);
}
VFP_OP(setreg_F0, d)
{
*(float64 *)((char *) env + PARAM1) = FT0d;
}
VFP_OP(setreg_F0, s)
{
*(float32 *)((char *) env + PARAM1) = FT0s;
}
void OPPROTO op_vfp_movl_T0_fpscr(void)
{
do_vfp_get_fpscr ();
}
void OPPROTO op_vfp_movl_T0_fpscr_flags(void)
{
T0 = env->vfp.xregs[ARM_VFP_FPSCR] & (0xf << 28);
}
void OPPROTO op_vfp_movl_fpscr_T0(void)
{
do_vfp_set_fpscr();
}
void OPPROTO op_vfp_movl_T0_xreg(void)
{
T0 = env->vfp.xregs[PARAM1];
}
void OPPROTO op_vfp_movl_xreg_T0(void)
{
env->vfp.xregs[PARAM1] = T0;
}
/* Move between FT0s to T0 */
void OPPROTO op_vfp_mrs(void)
{
T0 = vfp_stoi(FT0s);
}
void OPPROTO op_vfp_msr(void)
{
FT0s = vfp_itos(T0);
}
/* Move between FT0d and {T0,T1} */
void OPPROTO op_vfp_mrrd(void)
{
CPU_DoubleU u;
u.d = FT0d;
T0 = u.l.lower;
T1 = u.l.upper;
}
void OPPROTO op_vfp_mdrr(void)
{
CPU_DoubleU u;
u.l.lower = T0;
u.l.upper = T1;
FT0d = u.d;
}
/* Load immediate. PARAM1 is the 32 most significant bits of the value. */
void OPPROTO op_vfp_fconstd(void)
{
CPU_DoubleU u;
u.l.upper = PARAM1;
u.l.lower = 0;
FT0d = u.d;
}
void OPPROTO op_vfp_fconsts(void)
{
FT0s = vfp_itos(PARAM1);
}
void OPPROTO op_movl_cp_T0(void)
{
helper_set_cp(env, PARAM1, T0);
......
......@@ -40,194 +40,6 @@ void cpu_unlock(void)
spin_unlock(&global_cpu_lock);
}
/* VFP support. */
void do_vfp_abss(void)
{
FT0s = float32_abs(FT0s);
}
void do_vfp_absd(void)
{
FT0d = float64_abs(FT0d);
}
void do_vfp_sqrts(void)
{
FT0s = float32_sqrt(FT0s, &env->vfp.fp_status);
}
void do_vfp_sqrtd(void)
{
FT0d = float64_sqrt(FT0d, &env->vfp.fp_status);
}
/* XXX: check quiet/signaling case */
#define DO_VFP_cmp(p, size) \
void do_vfp_cmp##p(void) \
{ \
uint32_t flags; \
switch(float ## size ## _compare_quiet(FT0##p, FT1##p, &env->vfp.fp_status)) {\
case 0: flags = 0x6; break;\
case -1: flags = 0x8; break;\
case 1: flags = 0x2; break;\
default: case 2: flags = 0x3; break;\
}\
env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28)\
| (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
FORCE_RET(); \
}\
\
void do_vfp_cmpe##p(void) \
{ \
uint32_t flags; \
switch(float ## size ## _compare(FT0##p, FT1##p, &env->vfp.fp_status)) {\
case 0: flags = 0x6; break;\
case -1: flags = 0x8; break;\
case 1: flags = 0x2; break;\
default: case 2: flags = 0x3; break;\
}\
env->vfp.xregs[ARM_VFP_FPSCR] = (flags << 28)\
| (env->vfp.xregs[ARM_VFP_FPSCR] & 0x0fffffff); \
FORCE_RET(); \
}
DO_VFP_cmp(s, 32)
DO_VFP_cmp(d, 64)
#undef DO_VFP_cmp
/* Convert host exception flags to vfp form. */
static inline int vfp_exceptbits_from_host(int host_bits)
{
int target_bits = 0;
if (host_bits & float_flag_invalid)
target_bits |= 1;
if (host_bits & float_flag_divbyzero)
target_bits |= 2;
if (host_bits & float_flag_overflow)
target_bits |= 4;
if (host_bits & float_flag_underflow)
target_bits |= 8;
if (host_bits & float_flag_inexact)
target_bits |= 0x10;
return target_bits;
}
/* Convert vfp exception flags to target form. */
static inline int vfp_exceptbits_to_host(int target_bits)
{
int host_bits = 0;
if (target_bits & 1)
host_bits |= float_flag_invalid;
if (target_bits & 2)
host_bits |= float_flag_divbyzero;
if (target_bits & 4)
host_bits |= float_flag_overflow;
if (target_bits & 8)
host_bits |= float_flag_underflow;
if (target_bits & 0x10)
host_bits |= float_flag_inexact;
return host_bits;
}
void do_vfp_set_fpscr(void)
{
int i;
uint32_t changed;
changed = env->vfp.xregs[ARM_VFP_FPSCR];
env->vfp.xregs[ARM_VFP_FPSCR] = (T0 & 0xffc8ffff);
env->vfp.vec_len = (T0 >> 16) & 7;
env->vfp.vec_stride = (T0 >> 20) & 3;
changed ^= T0;
if (changed & (3 << 22)) {
i = (T0 >> 22) & 3;
switch (i) {
case 0:
i = float_round_nearest_even;
break;
case 1:
i = float_round_up;
break;
case 2:
i = float_round_down;
break;
case 3:
i = float_round_to_zero;
break;
}
set_float_rounding_mode(i, &env->vfp.fp_status);
}
i = vfp_exceptbits_to_host((T0 >> 8) & 0x1f);
set_float_exception_flags(i, &env->vfp.fp_status);
/* XXX: FZ and DN are not implemented. */
}
void do_vfp_get_fpscr(void)
{
int i;
T0 = (env->vfp.xregs[ARM_VFP_FPSCR] & 0xffc8ffff) | (env->vfp.vec_len << 16)
| (env->vfp.vec_stride << 20);
i = get_float_exception_flags(&env->vfp.fp_status);
T0 |= vfp_exceptbits_from_host(i);
}
float32 helper_recps_f32(float32 a, float32 b)
{
float_status *s = &env->vfp.fp_status;
float32 two = int32_to_float32(2, s);
return float32_sub(two, float32_mul(a, b, s), s);
}
float32 helper_rsqrts_f32(float32 a, float32 b)
{
float_status *s = &env->vfp.fp_status;
float32 three = int32_to_float32(3, s);
return float32_sub(three, float32_mul(a, b, s), s);
}
/* TODO: The architecture specifies the value that the estimate functions
should return. We return the exact reciprocal/root instead. */
float32 helper_recpe_f32(float32 a)
{
float_status *s = &env->vfp.fp_status;
float32 one = int32_to_float32(1, s);
return float32_div(one, a, s);
}
float32 helper_rsqrte_f32(float32 a)
{
float_status *s = &env->vfp.fp_status;
float32 one = int32_to_float32(1, s);
return float32_div(one, float32_sqrt(a, s), s);
}
uint32_t helper_recpe_u32(uint32_t a)
{
float_status *s = &env->vfp.fp_status;
float32 tmp;
tmp = int32_to_float32(a, s);
tmp = float32_scalbn(tmp, -32, s);
tmp = helper_recpe_f32(tmp);
tmp = float32_scalbn(tmp, 31, s);
return float32_to_int32(tmp, s);
}
uint32_t helper_rsqrte_u32(uint32_t a)
{
float_status *s = &env->vfp.fp_status;
float32 tmp;
tmp = int32_to_float32(a, s);
tmp = float32_scalbn(tmp, -32, s);
tmp = helper_rsqrte_f32(tmp);
tmp = float32_scalbn(tmp, 31, s);
return float32_to_int32(tmp, s);
}
void helper_neon_tbl(int rn, int maxindex)
{
uint32_t val;
......
......@@ -77,24 +77,6 @@ void OPPROTO glue(op_stqex,MEMSUFFIX)(void)
FORCE_RET();
}
/* Floating point load/store. Address is in T1 */
#define VFP_MEM_OP(p, w) \
void OPPROTO glue(op_vfp_ld##p,MEMSUFFIX)(void) \
{ \
FT0##p = glue(ldf##w,MEMSUFFIX)(T1); \
FORCE_RET(); \
} \
void OPPROTO glue(op_vfp_st##p,MEMSUFFIX)(void) \
{ \
glue(stf##w,MEMSUFFIX)(T1, FT0##p); \
FORCE_RET(); \
}
VFP_MEM_OP(s,l)
VFP_MEM_OP(d,q)
#undef VFP_MEM_OP
/* iwMMXt load/store. Address is in T1 */
#define MMX_MEM_OP(name, ldname) \
void OPPROTO glue(op_iwmmxt_ld##name,MEMSUFFIX)(void) \
......
......@@ -14,6 +14,29 @@
#define NFS &env->vfp.fp_status
#define NEON_OP(name) void OPPROTO op_neon_##name (void)
/* Helper routines to perform bitwise copies between float and int. */
static inline float32 vfp_itos(uint32_t i)
{
union {
uint32_t i;
float32 s;
} v;
v.i = i;
return v.s;
}
static inline uint32_t vfp_stoi(float32 s)
{
union {
uint32_t i;
float32 s;
} v;
v.s = s;
return v.i;
}
NEON_OP(getreg_T0)
{
T0 = *(uint32_t *)((char *) env + PARAM1);
......@@ -754,18 +777,6 @@ NEON_VOP(qrdmulh_s32, neon_s32, 1)
#undef NEON_FN
#undef NEON_QDMULH32
NEON_OP(recps_f32)
{
T0 = vfp_stoi(helper_recps_f32(vfp_itos(T0), vfp_itos(T1)));
FORCE_RET();
}
NEON_OP(rsqrts_f32)
{
T0 = vfp_stoi(helper_rsqrts_f32(vfp_itos(T0), vfp_itos(T1)));
FORCE_RET();
}
/* Floating point comparisons produce an integer result. */
#define NEON_VOP_FCMP(name, cmp) \
NEON_OP(name) \
......@@ -1702,27 +1713,6 @@ NEON_OP(zip_u16)
FORCE_RET();
}
/* Reciprocal/root estimate. */
NEON_OP(recpe_u32)
{
T0 = helper_recpe_u32(T0);
}
NEON_OP(rsqrte_u32)
{
T0 = helper_rsqrte_u32(T0);
}
NEON_OP(recpe_f32)
{
FT0s = helper_recpe_f32(FT0s);
}
NEON_OP(rsqrte_f32)
{
FT0s = helper_rsqrte_f32(FT0s);
}
/* Table lookup. This accessed the register file directly. */
NEON_OP(tbl)
{
......
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册