提交 27e1259a 编写于 作者: P Peter Maydell

Merge remote-tracking branch 'remotes/rth/tags/pull-axp-20150521' into staging

Rewrite fp exceptions

# gpg: Signature made Thu May 21 18:35:52 2015 BST using RSA key ID 4DD0279B
# gpg: Good signature from "Richard Henderson <rth7680@gmail.com>"
# gpg:                 aka "Richard Henderson <rth@redhat.com>"
# gpg:                 aka "Richard Henderson <rth@twiddle.net>"

* remotes/rth/tags/pull-axp-20150521:
  target-alpha: Add vector implementation for CMPBGE
  target-alpha: Rewrite helper_zapnot
  target-alpha: Raise IOV from CVTQL
  target-alpha: Suppress underflow from CVTTQ if DNZ
  target-alpha: Raise EXC_M_INV properly for fp inputs
  target-alpha: Disallow literal operand to 1C.30 to 1C.37
  target-alpha: Implement WH64EN
  target-alpha: Fix integer overflow checking insns
  target-alpha: Fix cvttq vs inf
  target-alpha: Fix cvttq vs large integers
  target-alpha: Raise IOV from CVTTQ
  target-alpha: Set EXC_M_SWC for exceptions from /S insns
  target-alpha: Set fpcr_exc_status even for disabled exceptions
  target-alpha: Tidy FPCR representation
  target-alpha: Set PC correctly for floating-point exceptions
  target-alpha: Forget installed round mode after MT_FPCR
  target-alpha: Rename floating-point subroutines
  target-alpha: Move VAX helpers to a new file
Signed-off-by: NPeter Maydell <peter.maydell@linaro.org>
obj-$(CONFIG_SOFTMMU) += machine.o
obj-y += translate.o helper.o cpu.o
obj-y += int_helper.o fpu_helper.o sys_helper.o mem_helper.o
obj-y += int_helper.o fpu_helper.o vax_helper.o sys_helper.o mem_helper.o
obj-y += gdbstub.o
......@@ -150,54 +150,54 @@ enum {
FP_ROUND_DYNAMIC = 0x3,
};
/* FPCR bits */
#define FPCR_SUM (1ULL << 63)
#define FPCR_INED (1ULL << 62)
#define FPCR_UNFD (1ULL << 61)
#define FPCR_UNDZ (1ULL << 60)
#define FPCR_DYN_SHIFT 58
#define FPCR_DYN_CHOPPED (0ULL << FPCR_DYN_SHIFT)
#define FPCR_DYN_MINUS (1ULL << FPCR_DYN_SHIFT)
#define FPCR_DYN_NORMAL (2ULL << FPCR_DYN_SHIFT)
#define FPCR_DYN_PLUS (3ULL << FPCR_DYN_SHIFT)
#define FPCR_DYN_MASK (3ULL << FPCR_DYN_SHIFT)
#define FPCR_IOV (1ULL << 57)
#define FPCR_INE (1ULL << 56)
#define FPCR_UNF (1ULL << 55)
#define FPCR_OVF (1ULL << 54)
#define FPCR_DZE (1ULL << 53)
#define FPCR_INV (1ULL << 52)
#define FPCR_OVFD (1ULL << 51)
#define FPCR_DZED (1ULL << 50)
#define FPCR_INVD (1ULL << 49)
#define FPCR_DNZ (1ULL << 48)
#define FPCR_DNOD (1ULL << 47)
#define FPCR_STATUS_MASK (FPCR_IOV | FPCR_INE | FPCR_UNF \
| FPCR_OVF | FPCR_DZE | FPCR_INV)
/* FPCR bits -- right-shifted 32 so we can use a uint32_t. */
#define FPCR_SUM (1U << (63 - 32))
#define FPCR_INED (1U << (62 - 32))
#define FPCR_UNFD (1U << (61 - 32))
#define FPCR_UNDZ (1U << (60 - 32))
#define FPCR_DYN_SHIFT (58 - 32)
#define FPCR_DYN_CHOPPED (0U << FPCR_DYN_SHIFT)
#define FPCR_DYN_MINUS (1U << FPCR_DYN_SHIFT)
#define FPCR_DYN_NORMAL (2U << FPCR_DYN_SHIFT)
#define FPCR_DYN_PLUS (3U << FPCR_DYN_SHIFT)
#define FPCR_DYN_MASK (3U << FPCR_DYN_SHIFT)
#define FPCR_IOV (1U << (57 - 32))
#define FPCR_INE (1U << (56 - 32))
#define FPCR_UNF (1U << (55 - 32))
#define FPCR_OVF (1U << (54 - 32))
#define FPCR_DZE (1U << (53 - 32))
#define FPCR_INV (1U << (52 - 32))
#define FPCR_OVFD (1U << (51 - 32))
#define FPCR_DZED (1U << (50 - 32))
#define FPCR_INVD (1U << (49 - 32))
#define FPCR_DNZ (1U << (48 - 32))
#define FPCR_DNOD (1U << (47 - 32))
#define FPCR_STATUS_MASK (FPCR_IOV | FPCR_INE | FPCR_UNF \
| FPCR_OVF | FPCR_DZE | FPCR_INV)
/* The silly software trap enables implemented by the kernel emulation.
These are more or less architecturally required, since the real hardware
has read-as-zero bits in the FPCR when the features aren't implemented.
For the purposes of QEMU, we pretend the FPCR can hold everything. */
#define SWCR_TRAP_ENABLE_INV (1ULL << 1)
#define SWCR_TRAP_ENABLE_DZE (1ULL << 2)
#define SWCR_TRAP_ENABLE_OVF (1ULL << 3)
#define SWCR_TRAP_ENABLE_UNF (1ULL << 4)
#define SWCR_TRAP_ENABLE_INE (1ULL << 5)
#define SWCR_TRAP_ENABLE_DNO (1ULL << 6)
#define SWCR_TRAP_ENABLE_MASK ((1ULL << 7) - (1ULL << 1))
#define SWCR_MAP_DMZ (1ULL << 12)
#define SWCR_MAP_UMZ (1ULL << 13)
#define SWCR_MAP_MASK (SWCR_MAP_DMZ | SWCR_MAP_UMZ)
#define SWCR_STATUS_INV (1ULL << 17)
#define SWCR_STATUS_DZE (1ULL << 18)
#define SWCR_STATUS_OVF (1ULL << 19)
#define SWCR_STATUS_UNF (1ULL << 20)
#define SWCR_STATUS_INE (1ULL << 21)
#define SWCR_STATUS_DNO (1ULL << 22)
#define SWCR_STATUS_MASK ((1ULL << 23) - (1ULL << 17))
#define SWCR_TRAP_ENABLE_INV (1U << 1)
#define SWCR_TRAP_ENABLE_DZE (1U << 2)
#define SWCR_TRAP_ENABLE_OVF (1U << 3)
#define SWCR_TRAP_ENABLE_UNF (1U << 4)
#define SWCR_TRAP_ENABLE_INE (1U << 5)
#define SWCR_TRAP_ENABLE_DNO (1U << 6)
#define SWCR_TRAP_ENABLE_MASK ((1U << 7) - (1U << 1))
#define SWCR_MAP_DMZ (1U << 12)
#define SWCR_MAP_UMZ (1U << 13)
#define SWCR_MAP_MASK (SWCR_MAP_DMZ | SWCR_MAP_UMZ)
#define SWCR_STATUS_INV (1U << 17)
#define SWCR_STATUS_DZE (1U << 18)
#define SWCR_STATUS_OVF (1U << 19)
#define SWCR_STATUS_UNF (1U << 20)
#define SWCR_STATUS_INE (1U << 21)
#define SWCR_STATUS_DNO (1U << 22)
#define SWCR_STATUS_MASK ((1U << 23) - (1U << 17))
#define SWCR_MASK (SWCR_TRAP_ENABLE_MASK | SWCR_MAP_MASK | SWCR_STATUS_MASK)
......@@ -238,14 +238,13 @@ struct CPUAlphaState {
uint64_t lock_addr;
uint64_t lock_st_addr;
uint64_t lock_value;
/* The FPCR, and disassembled portions thereof. */
uint32_t fpcr;
uint32_t fpcr_exc_enable;
float_status fp_status;
/* The following fields make up the FPCR, but in FP_STATUS format. */
uint8_t fpcr_exc_status;
uint8_t fpcr_exc_mask;
uint8_t fpcr_dyn_round;
uint8_t fpcr_flush_to_zero;
uint8_t fpcr_dnod;
uint8_t fpcr_undz;
/* The Internal Processor Registers. Some of these we assume always
exist for use in user-mode. */
......
此差异已折叠。
......@@ -25,136 +25,48 @@
#include "fpu/softfloat.h"
#include "exec/helper-proto.h"
uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
{
uint64_t r = 0;
uint8_t t;
t = env->fpcr_exc_status;
if (t) {
r = FPCR_SUM;
if (t & float_flag_invalid) {
r |= FPCR_INV;
}
if (t & float_flag_divbyzero) {
r |= FPCR_DZE;
}
if (t & float_flag_overflow) {
r |= FPCR_OVF;
}
if (t & float_flag_underflow) {
r |= FPCR_UNF;
}
if (t & float_flag_inexact) {
r |= FPCR_INE;
}
}
t = env->fpcr_exc_mask;
if (t & float_flag_invalid) {
r |= FPCR_INVD;
}
if (t & float_flag_divbyzero) {
r |= FPCR_DZED;
}
if (t & float_flag_overflow) {
r |= FPCR_OVFD;
}
if (t & float_flag_underflow) {
r |= FPCR_UNFD;
}
if (t & float_flag_inexact) {
r |= FPCR_INED;
}
switch (env->fpcr_dyn_round) {
case float_round_nearest_even:
r |= FPCR_DYN_NORMAL;
break;
case float_round_down:
r |= FPCR_DYN_MINUS;
break;
case float_round_up:
r |= FPCR_DYN_PLUS;
break;
case float_round_to_zero:
r |= FPCR_DYN_CHOPPED;
break;
}
if (env->fp_status.flush_inputs_to_zero) {
r |= FPCR_DNZ;
}
if (env->fpcr_dnod) {
r |= FPCR_DNOD;
}
if (env->fpcr_undz) {
r |= FPCR_UNDZ;
}
#define CONVERT_BIT(X, SRC, DST) \
(SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
return r;
uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
{
return (uint64_t)env->fpcr << 32;
}
void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val)
{
uint8_t t;
uint32_t fpcr = val >> 32;
uint32_t t = 0;
t = 0;
if (val & FPCR_INV) {
t |= float_flag_invalid;
}
if (val & FPCR_DZE) {
t |= float_flag_divbyzero;
}
if (val & FPCR_OVF) {
t |= float_flag_overflow;
}
if (val & FPCR_UNF) {
t |= float_flag_underflow;
}
if (val & FPCR_INE) {
t |= float_flag_inexact;
}
env->fpcr_exc_status = t;
t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
t = 0;
if (val & FPCR_INVD) {
t |= float_flag_invalid;
}
if (val & FPCR_DZED) {
t |= float_flag_divbyzero;
}
if (val & FPCR_OVFD) {
t |= float_flag_overflow;
}
if (val & FPCR_UNFD) {
t |= float_flag_underflow;
}
if (val & FPCR_INED) {
t |= float_flag_inexact;
}
env->fpcr_exc_mask = t;
env->fpcr = fpcr;
env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
switch (val & FPCR_DYN_MASK) {
switch (fpcr & FPCR_DYN_MASK) {
case FPCR_DYN_NORMAL:
default:
t = float_round_nearest_even;
break;
case FPCR_DYN_CHOPPED:
t = float_round_to_zero;
break;
case FPCR_DYN_MINUS:
t = float_round_down;
break;
case FPCR_DYN_NORMAL:
t = float_round_nearest_even;
break;
case FPCR_DYN_PLUS:
t = float_round_up;
break;
}
env->fpcr_dyn_round = t;
env->fpcr_dnod = (val & FPCR_DNOD) != 0;
env->fpcr_undz = (val & FPCR_UNDZ) != 0;
env->fpcr_flush_to_zero = env->fpcr_dnod & env->fpcr_undz;
env->fp_status.flush_inputs_to_zero = (val & FPCR_DNZ) != 0;
env->fpcr_flush_to_zero = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
}
uint64_t helper_load_fpcr(CPUAlphaState *env)
......@@ -571,6 +483,8 @@ void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
env->error_code = error;
if (retaddr) {
cpu_restore_state(cs, retaddr);
/* Floating-point exceptions (our only users) point to the next PC. */
env->pc += 4;
}
cpu_loop_exit(cs);
}
......
DEF_HELPER_3(excp, noreturn, env, int, int)
DEF_HELPER_FLAGS_1(load_pcc, TCG_CALL_NO_RWG_SE, i64, env)
DEF_HELPER_FLAGS_3(addqv, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(addlv, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(subqv, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(sublv, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(mullv, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(mulqv, TCG_CALL_NO_WG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(check_overflow, TCG_CALL_NO_WG, void, env, i64, i64)
DEF_HELPER_FLAGS_1(ctpop, TCG_CALL_NO_RWG_SE, i64, i64)
DEF_HELPER_FLAGS_1(ctlz, TCG_CALL_NO_RWG_SE, i64, i64)
......@@ -83,18 +78,17 @@ DEF_HELPER_FLAGS_2(cvtqg, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(cvttq, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(cvttq_c, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(cvttq_svic, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(cvtql, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(setroundmode, TCG_CALL_NO_RWG, void, env, i32)
DEF_HELPER_FLAGS_2(setflushzero, TCG_CALL_NO_RWG, void, env, i32)
DEF_HELPER_FLAGS_1(fp_exc_clear, TCG_CALL_NO_RWG, void, env)
DEF_HELPER_FLAGS_1(fp_exc_get, TCG_CALL_NO_RWG_SE, i32, env)
DEF_HELPER_FLAGS_3(fp_exc_raise, TCG_CALL_NO_WG, void, env, i32, i32)
DEF_HELPER_FLAGS_3(fp_exc_raise_s, TCG_CALL_NO_WG, void, env, i32, i32)
DEF_HELPER_FLAGS_2(ieee_input, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_2(ieee_input_cmp, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_2(fcvtql_v_input, TCG_CALL_NO_WG, void, env, i64)
DEF_HELPER_FLAGS_2(ieee_input_s, TCG_CALL_NO_WG, void, env, i64)
#if !defined (CONFIG_USER_ONLY)
DEF_HELPER_2(hw_ret, void, env, i64)
......
......@@ -37,35 +37,65 @@ uint64_t helper_cttz(uint64_t arg)
return ctz64(arg);
}
static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
uint64_t helper_zapnot(uint64_t val, uint64_t mskb)
{
uint64_t mask;
mask = 0;
mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
return op & ~mask;
}
mask = -(mskb & 0x01) & 0x00000000000000ffull;
mask |= -(mskb & 0x02) & 0x000000000000ff00ull;
mask |= -(mskb & 0x04) & 0x0000000000ff0000ull;
mask |= -(mskb & 0x08) & 0x00000000ff000000ull;
mask |= -(mskb & 0x10) & 0x000000ff00000000ull;
mask |= -(mskb & 0x20) & 0x0000ff0000000000ull;
mask |= -(mskb & 0x40) & 0x00ff000000000000ull;
mask |= -(mskb & 0x80) & 0xff00000000000000ull;
uint64_t helper_zap(uint64_t val, uint64_t mask)
{
return byte_zap(val, mask);
return val & mask;
}
uint64_t helper_zapnot(uint64_t val, uint64_t mask)
uint64_t helper_zap(uint64_t val, uint64_t mask)
{
return byte_zap(val, ~mask);
return helper_zapnot(val, ~mask);
}
uint64_t helper_cmpbge(uint64_t op1, uint64_t op2)
{
#if defined(__SSE2__)
uint64_t r;
/* The cmpbge instruction is heavily used in the implementation of
every string function on Alpha. We can do much better than either
the default loop below, or even an unrolled version by using the
native vector support. */
{
typedef uint64_t Q __attribute__((vector_size(16)));
typedef uint8_t B __attribute__((vector_size(16)));
Q q1 = (Q){ op1, 0 };
Q q2 = (Q){ op2, 0 };
q1 = (Q)((B)q1 >= (B)q2);
r = q1[0];
}
/* Select only one bit from each byte. */
r &= 0x0101010101010101;
/* Collect the bits into the bottom byte. */
/* .......A.......B.......C.......D.......E.......F.......G.......H */
r |= r >> (8 - 1);
/* .......A......AB......BC......CD......DE......EF......FG......GH */
r |= r >> (16 - 2);
/* .......A......AB.....ABC....ABCD....BCDE....CDEF....DEFG....EFGH */
r |= r >> (32 - 4);
/* .......A......AB.....ABC....ABCD...ABCDE..ABCDEF.ABCDEFGABCDEFGH */
/* Return only the low 8 bits. */
return r & 0xff;
#else
uint8_t opa, opb, res;
int i;
......@@ -78,6 +108,7 @@ uint64_t helper_cmpbge(uint64_t op1, uint64_t op2)
}
}
return res;
#endif
}
uint64_t helper_minub8(uint64_t op1, uint64_t op2)
......@@ -249,64 +280,9 @@ uint64_t helper_unpkbw(uint64_t op1)
| ((op1 & 0xff000000) << 24));
}
uint64_t helper_addqv(CPUAlphaState *env, uint64_t op1, uint64_t op2)
void helper_check_overflow(CPUAlphaState *env, uint64_t op1, uint64_t op2)
{
uint64_t tmp = op1;
op1 += op2;
if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
arith_excp(env, GETPC(), EXC_M_IOV, 0);
}
return op1;
}
uint64_t helper_addlv(CPUAlphaState *env, uint64_t op1, uint64_t op2)
{
uint64_t tmp = op1;
op1 = (uint32_t)(op1 + op2);
if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
arith_excp(env, GETPC(), EXC_M_IOV, 0);
}
return op1;
}
uint64_t helper_subqv(CPUAlphaState *env, uint64_t op1, uint64_t op2)
{
uint64_t res;
res = op1 - op2;
if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
arith_excp(env, GETPC(), EXC_M_IOV, 0);
}
return res;
}
uint64_t helper_sublv(CPUAlphaState *env, uint64_t op1, uint64_t op2)
{
uint32_t res;
res = op1 - op2;
if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
arith_excp(env, GETPC(), EXC_M_IOV, 0);
}
return res;
}
uint64_t helper_mullv(CPUAlphaState *env, uint64_t op1, uint64_t op2)
{
int64_t res = (int64_t)op1 * (int64_t)op2;
if (unlikely((int32_t)res != res)) {
arith_excp(env, GETPC(), EXC_M_IOV, 0);
}
return (int64_t)((int32_t)res);
}
uint64_t helper_mulqv(CPUAlphaState *env, uint64_t op1, uint64_t op2)
{
uint64_t tl, th;
muls64(&tl, &th, op1, op2);
/* If th != 0 && th != -1, then we had an overflow */
if (unlikely((th + 1) > 1)) {
if (unlikely(op1 != op2)) {
arith_excp(env, GETPC(), EXC_M_IOV, 0);
}
return tl;
}
......@@ -128,7 +128,14 @@ void alpha_cpu_unassigned_access(CPUState *cs, hwaddr addr,
env->trap_arg0 = addr;
env->trap_arg1 = is_write ? 1 : 0;
dynamic_excp(env, 0, EXCP_MCHK, 0);
cs->exception_index = EXCP_MCHK;
env->error_code = 0;
/* ??? We should cpu_restore_state to the faulting insn, but this hook
does not have access to the retaddr value from the orignal helper.
It's all moot until the QEMU PALcode grows an MCHK handler. */
cpu_loop_exit(cs);
}
/* try to fill the TLB and return an exception if error. If retaddr is
......
此差异已折叠。
/*
* Helpers for vax floating point instructions.
*
* Copyright (c) 2007 Jocelyn Mayer
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
*/
#include "cpu.h"
#include "exec/helper-proto.h"
#include "fpu/softfloat.h"
#define FP_STATUS (env->fp_status)
/* F floating (VAX) */
static uint64_t float32_to_f(float32 fa)
{
uint64_t r, exp, mant, sig;
CPU_FloatU a;
a.f = fa;
sig = ((uint64_t)a.l & 0x80000000) << 32;
exp = (a.l >> 23) & 0xff;
mant = ((uint64_t)a.l & 0x007fffff) << 29;
if (exp == 255) {
/* NaN or infinity */
r = 1; /* VAX dirty zero */
} else if (exp == 0) {
if (mant == 0) {
/* Zero */
r = 0;
} else {
/* Denormalized */
r = sig | ((exp + 1) << 52) | mant;
}
} else {
if (exp >= 253) {
/* Overflow */
r = 1; /* VAX dirty zero */
} else {
r = sig | ((exp + 2) << 52);
}
}
return r;
}
static float32 f_to_float32(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
{
uint32_t exp, mant_sig;
CPU_FloatU r;
exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
if (unlikely(!exp && mant_sig)) {
/* Reserved operands / Dirty zero */
dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
}
if (exp < 3) {
/* Underflow */
r.l = 0;
} else {
r.l = ((exp - 2) << 23) | mant_sig;
}
return r.f;
}
uint32_t helper_f_to_memory(uint64_t a)
{
uint32_t r;
r = (a & 0x00001fffe0000000ull) >> 13;
r |= (a & 0x07ffe00000000000ull) >> 45;
r |= (a & 0xc000000000000000ull) >> 48;
return r;
}
uint64_t helper_memory_to_f(uint32_t a)
{
uint64_t r;
r = ((uint64_t)(a & 0x0000c000)) << 48;
r |= ((uint64_t)(a & 0x003fffff)) << 45;
r |= ((uint64_t)(a & 0xffff0000)) << 13;
if (!(a & 0x00004000)) {
r |= 0x7ll << 59;
}
return r;
}
/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
either implement VAX arithmetic properly or just signal invalid opcode. */
uint64_t helper_addf(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float32 fa, fb, fr;
fa = f_to_float32(env, GETPC(), a);
fb = f_to_float32(env, GETPC(), b);
fr = float32_add(fa, fb, &FP_STATUS);
return float32_to_f(fr);
}
uint64_t helper_subf(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float32 fa, fb, fr;
fa = f_to_float32(env, GETPC(), a);
fb = f_to_float32(env, GETPC(), b);
fr = float32_sub(fa, fb, &FP_STATUS);
return float32_to_f(fr);
}
uint64_t helper_mulf(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float32 fa, fb, fr;
fa = f_to_float32(env, GETPC(), a);
fb = f_to_float32(env, GETPC(), b);
fr = float32_mul(fa, fb, &FP_STATUS);
return float32_to_f(fr);
}
uint64_t helper_divf(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float32 fa, fb, fr;
fa = f_to_float32(env, GETPC(), a);
fb = f_to_float32(env, GETPC(), b);
fr = float32_div(fa, fb, &FP_STATUS);
return float32_to_f(fr);
}
uint64_t helper_sqrtf(CPUAlphaState *env, uint64_t t)
{
float32 ft, fr;
ft = f_to_float32(env, GETPC(), t);
fr = float32_sqrt(ft, &FP_STATUS);
return float32_to_f(fr);
}
/* G floating (VAX) */
static uint64_t float64_to_g(float64 fa)
{
uint64_t r, exp, mant, sig;
CPU_DoubleU a;
a.d = fa;
sig = a.ll & 0x8000000000000000ull;
exp = (a.ll >> 52) & 0x7ff;
mant = a.ll & 0x000fffffffffffffull;
if (exp == 2047) {
/* NaN or infinity */
r = 1; /* VAX dirty zero */
} else if (exp == 0) {
if (mant == 0) {
/* Zero */
r = 0;
} else {
/* Denormalized */
r = sig | ((exp + 1) << 52) | mant;
}
} else {
if (exp >= 2045) {
/* Overflow */
r = 1; /* VAX dirty zero */
} else {
r = sig | ((exp + 2) << 52);
}
}
return r;
}
static float64 g_to_float64(CPUAlphaState *env, uintptr_t retaddr, uint64_t a)
{
uint64_t exp, mant_sig;
CPU_DoubleU r;
exp = (a >> 52) & 0x7ff;
mant_sig = a & 0x800fffffffffffffull;
if (!exp && mant_sig) {
/* Reserved operands / Dirty zero */
dynamic_excp(env, retaddr, EXCP_OPCDEC, 0);
}
if (exp < 3) {
/* Underflow */
r.ll = 0;
} else {
r.ll = ((exp - 2) << 52) | mant_sig;
}
return r.d;
}
uint64_t helper_g_to_memory(uint64_t a)
{
uint64_t r;
r = (a & 0x000000000000ffffull) << 48;
r |= (a & 0x00000000ffff0000ull) << 16;
r |= (a & 0x0000ffff00000000ull) >> 16;
r |= (a & 0xffff000000000000ull) >> 48;
return r;
}
uint64_t helper_memory_to_g(uint64_t a)
{
uint64_t r;
r = (a & 0x000000000000ffffull) << 48;
r |= (a & 0x00000000ffff0000ull) << 16;
r |= (a & 0x0000ffff00000000ull) >> 16;
r |= (a & 0xffff000000000000ull) >> 48;
return r;
}
uint64_t helper_addg(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float64 fa, fb, fr;
fa = g_to_float64(env, GETPC(), a);
fb = g_to_float64(env, GETPC(), b);
fr = float64_add(fa, fb, &FP_STATUS);
return float64_to_g(fr);
}
uint64_t helper_subg(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float64 fa, fb, fr;
fa = g_to_float64(env, GETPC(), a);
fb = g_to_float64(env, GETPC(), b);
fr = float64_sub(fa, fb, &FP_STATUS);
return float64_to_g(fr);
}
uint64_t helper_mulg(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float64 fa, fb, fr;
fa = g_to_float64(env, GETPC(), a);
fb = g_to_float64(env, GETPC(), b);
fr = float64_mul(fa, fb, &FP_STATUS);
return float64_to_g(fr);
}
uint64_t helper_divg(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float64 fa, fb, fr;
fa = g_to_float64(env, GETPC(), a);
fb = g_to_float64(env, GETPC(), b);
fr = float64_div(fa, fb, &FP_STATUS);
return float64_to_g(fr);
}
uint64_t helper_sqrtg(CPUAlphaState *env, uint64_t a)
{
float64 fa, fr;
fa = g_to_float64(env, GETPC(), a);
fr = float64_sqrt(fa, &FP_STATUS);
return float64_to_g(fr);
}
uint64_t helper_cmpgeq(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float64 fa, fb;
fa = g_to_float64(env, GETPC(), a);
fb = g_to_float64(env, GETPC(), b);
if (float64_eq_quiet(fa, fb, &FP_STATUS)) {
return 0x4000000000000000ULL;
} else {
return 0;
}
}
uint64_t helper_cmpgle(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float64 fa, fb;
fa = g_to_float64(env, GETPC(), a);
fb = g_to_float64(env, GETPC(), b);
if (float64_le(fa, fb, &FP_STATUS)) {
return 0x4000000000000000ULL;
} else {
return 0;
}
}
uint64_t helper_cmpglt(CPUAlphaState *env, uint64_t a, uint64_t b)
{
float64 fa, fb;
fa = g_to_float64(env, GETPC(), a);
fb = g_to_float64(env, GETPC(), b);
if (float64_lt(fa, fb, &FP_STATUS)) {
return 0x4000000000000000ULL;
} else {
return 0;
}
}
uint64_t helper_cvtqf(CPUAlphaState *env, uint64_t a)
{
float32 fr = int64_to_float32(a, &FP_STATUS);
return float32_to_f(fr);
}
uint64_t helper_cvtgf(CPUAlphaState *env, uint64_t a)
{
float64 fa;
float32 fr;
fa = g_to_float64(env, GETPC(), a);
fr = float64_to_float32(fa, &FP_STATUS);
return float32_to_f(fr);
}
uint64_t helper_cvtgq(CPUAlphaState *env, uint64_t a)
{
float64 fa = g_to_float64(env, GETPC(), a);
return float64_to_int64_round_to_zero(fa, &FP_STATUS);
}
uint64_t helper_cvtqg(CPUAlphaState *env, uint64_t a)
{
float64 fr;
fr = int64_to_float64(a, &FP_STATUS);
return float64_to_g(fr);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册