提交 90747692 编写于 作者: S Szabolcs Nagy 提交者: Rich Felker

math: rewrite fma with mostly int arithmetics

the freebsd fma code failed to raise underflow exception in some
cases in nearest rounding mode (affects fmal too) e.g.

  fma(-0x1p-1000, 0x1.000001p-74, 0x1p-1022)

and the inexact exception may be raised spuriously since the fenv
is not saved/restored around the exact multiplication algorithm
(affects x86 fma too).

another issue is that the underflow behaviour when the rounded result
is the minimal normal number is target dependent, ieee754 allows two
ways to raise underflow for inexact results: raise if the result before
rounding is in the subnormal range (e.g. aarch64, arm, powerpc) or if
the result after rounding with infinite exponent range is in the
subnormal range (e.g. x86, mips, sh).

to avoid all these issues the algorithm was rewritten with mostly int
arithmetics and float arithmetics is only used to get correct rounding
and raise exceptions according to the behaviour of the target without
any fenv.h dependency. it also unifies x86 and non-x86 fma.

fmaf is not affected, fmal need to be fixed too.

this algorithm depends on a_clz_64 and it required a few spurious
instructions to make sure underflow exception is raised in a particular
corner case. (normally FORCE_EVAL(tiny*tiny) would be used for this,
but on i386 gcc is broken if the expression is constant
https://gcc.gnu.org/bugzilla/show_bug.cgi?id=57245
and there is no easy portable fix for the macro.)
上级 b3516058
#include <fenv.h> #include <stdint.h>
#include "libm.h" #include <float.h>
#include <math.h>
#include "atomic.h"
#if LDBL_MANT_DIG==64 && LDBL_MAX_EXP==16384 #define ASUINT64(x) ((union {double f; uint64_t i;}){x}).i
/* exact add, assumes exponent_x >= exponent_y */ #define ZEROINFNAN (0x7ff-0x3ff-52-1)
static void add(long double *hi, long double *lo, long double x, long double y)
{
long double r;
r = x + y;
*hi = r;
r -= x;
*lo = y - r;
}
/* exact mul, assumes no over/underflow */
static void mul(long double *hi, long double *lo, long double x, long double y)
{
static const long double c = 1.0 + 0x1p32L;
long double cx, xh, xl, cy, yh, yl;
cx = c*x; struct num { uint64_t m; int e; int sign; };
xh = (x - cx) + cx;
xl = x - xh;
cy = c*y;
yh = (y - cy) + cy;
yl = y - yh;
*hi = x*y;
*lo = (xh*yh - *hi) + xh*yl + xl*yh + xl*yl;
}
/* static struct num normalize(double x)
assume (long double)(hi+lo) == hi
return an adjusted hi so that rounding it to double (or less) precision is correct
*/
static long double adjust(long double hi, long double lo)
{ {
union ldshape uhi, ulo; uint64_t ix = ASUINT64(x);
int e = ix>>52;
if (lo == 0) int sign = e & 0x800;
return hi; e &= 0x7ff;
uhi.f = hi; if (!e) {
if (uhi.i.m & 0x3ff) ix = ASUINT64(x*0x1p63);
return hi; e = ix>>52 & 0x7ff;
ulo.f = lo; e = e ? e-63 : 0x800;
if ((uhi.i.se & 0x8000) == (ulo.i.se & 0x8000))
uhi.i.m++;
else {
/* handle underflow and take care of ld80 implicit msb */
if (uhi.i.m << 1 == 0) {
uhi.i.m = 0;
uhi.i.se--;
}
uhi.i.m--;
} }
return uhi.f; ix &= (1ull<<52)-1;
ix |= 1ull<<52;
ix <<= 1;
e -= 0x3ff + 52 + 1;
return (struct num){ix,e,sign};
} }
/* adjusted add so the result is correct when rounded to double (or less) precision */ static void mul(uint64_t *hi, uint64_t *lo, uint64_t x, uint64_t y)
static long double dadd(long double x, long double y)
{ {
add(&x, &y, x, y); uint64_t t1,t2,t3;
return adjust(x, y); uint64_t xlo = (uint32_t)x, xhi = x>>32;
} uint64_t ylo = (uint32_t)y, yhi = y>>32;
/* adjusted mul so the result is correct when rounded to double (or less) precision */ t1 = xlo*ylo;
static long double dmul(long double x, long double y) t2 = xlo*yhi + xhi*ylo;
{ t3 = xhi*yhi;
mul(&x, &y, x, y); *lo = t1 + (t2<<32);
return adjust(x, y); *hi = t3 + (t2>>32) + (t1 > *lo);
}
static int getexp(long double x)
{
union ldshape u;
u.f = x;
return u.i.se & 0x7fff;
} }
double fma(double x, double y, double z) double fma(double x, double y, double z)
{ {
#pragma STDC FENV_ACCESS ON #pragma STDC FENV_ACCESS ON
long double hi, lo1, lo2, xy;
int round, ez, exy;
/* handle +-inf,nan */ /* normalize so top 10bits and last bit are 0 */
if (!isfinite(x) || !isfinite(y)) struct num nx, ny, nz;
nx = normalize(x);
ny = normalize(y);
nz = normalize(z);
if (nx.e >= ZEROINFNAN || ny.e >= ZEROINFNAN)
return x*y + z; return x*y + z;
if (!isfinite(z)) if (nz.e >= ZEROINFNAN) {
if (nz.e > ZEROINFNAN) /* z==0 */
return x*y + z;
return z; return z;
/* handle +-0 */
if (x == 0.0 || y == 0.0)
return x*y + z;
round = fegetround();
if (z == 0.0) {
if (round == FE_TONEAREST)
return dmul(x, y);
return x*y;
} }
/* exact mul and add require nearest rounding */ /* mul: r = x*y */
/* spurious inexact exceptions may be raised */ uint64_t rhi, rlo, zhi, zlo;
fesetround(FE_TONEAREST); mul(&rhi, &rlo, nx.m, ny.m);
mul(&xy, &lo1, x, y); /* either top 20 or 21 bits of rhi and last 2 bits of rlo are 0 */
exy = getexp(xy);
ez = getexp(z); /* align exponents */
if (ez > exy) { int e = nx.e + ny.e;
add(&hi, &lo2, z, xy); int d = nz.e - e;
} else if (ez > exy - 12) { /* shift bits z<<=kz, r>>=kr, so kz+kr == d, set e = e+kr (== ez-kz) */
add(&hi, &lo2, xy, z); if (d > 0) {
if (hi == 0) { if (d < 64) {
/* zlo = nz.m<<d;
xy + z is 0, but it should be calculated with the zhi = nz.m>>64-d;
original rounding mode so the sign is correct, if the } else {
compiler does not support FENV_ACCESS ON it does not zlo = 0;
know about the changed rounding mode and eliminates zhi = nz.m;
the xy + z below without the volatile memory access e = nz.e - 64;
*/ d -= 64;
volatile double z_; if (d == 0) {
fesetround(round); } else if (d < 64) {
z_ = z; rlo = rhi<<64-d | rlo>>d | !!(rlo<<64-d);
return (xy + z_) + lo1; rhi = rhi>>d;
} else {
rlo = 1;
rhi = 0;
}
} }
} else { } else {
/* zhi = 0;
ez <= exy - 12 d = -d;
the 12 extra bits (1guard, 11round+sticky) are needed so with if (d == 0) {
lo = dadd(lo1, lo2) zlo = nz.m;
elo <= ehi - 11, and we use the last 10 bits in adjust so } else if (d < 64) {
dadd(hi, lo) zlo = nz.m>>d | !!(nz.m<<64-d);
gives correct result when rounded to double } else {
*/ zlo = 1;
hi = xy;
lo2 = z;
}
/*
the result is stored before return for correct precision and exceptions
one corner case is when the underflow flag should be raised because
the precise result is an inexact subnormal double, but the calculated
long double result is an exact subnormal double
(so rounding to double does not raise exceptions)
in nearest rounding mode dadd takes care of this: the last bit of the
result is adjusted so rounding sees an inexact value when it should
in non-nearest rounding mode fenv is used for the workaround
*/
fesetround(round);
if (round == FE_TONEAREST)
z = dadd(hi, dadd(lo1, lo2));
else {
#if defined(FE_INEXACT) && defined(FE_UNDERFLOW)
int e = fetestexcept(FE_INEXACT);
feclearexcept(FE_INEXACT);
#endif
z = hi + (lo1 + lo2);
#if defined(FE_INEXACT) && defined(FE_UNDERFLOW)
if (getexp(z) < 0x3fff-1022 && fetestexcept(FE_INEXACT))
feraiseexcept(FE_UNDERFLOW);
else if (e)
feraiseexcept(FE_INEXACT);
#endif
}
return z;
}
#else
/* origin: FreeBSD /usr/src/lib/msun/src/s_fma.c */
/*-
* Copyright (c) 2005-2011 David Schultz <das@FreeBSD.ORG>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* A struct dd represents a floating-point number with twice the precision
* of a double. We maintain the invariant that "hi" stores the 53 high-order
* bits of the result.
*/
struct dd {
double hi;
double lo;
};
/*
* Compute a+b exactly, returning the exact result in a struct dd. We assume
* that both a and b are finite, but make no assumptions about their relative
* magnitudes.
*/
static inline struct dd dd_add(double a, double b)
{
struct dd ret;
double s;
ret.hi = a + b;
s = ret.hi - a;
ret.lo = (a - (ret.hi - s)) + (b - s);
return (ret);
}
/*
* Compute a+b, with a small tweak: The least significant bit of the
* result is adjusted into a sticky bit summarizing all the bits that
* were lost to rounding. This adjustment negates the effects of double
* rounding when the result is added to another number with a higher
* exponent. For an explanation of round and sticky bits, see any reference
* on FPU design, e.g.,
*
* J. Coonen. An Implementation Guide to a Proposed Standard for
* Floating-Point Arithmetic. Computer, vol. 13, no. 1, Jan 1980.
*/
static inline double add_adjusted(double a, double b)
{
struct dd sum;
union {double f; uint64_t i;} uhi, ulo;
sum = dd_add(a, b);
if (sum.lo != 0) {
uhi.f = sum.hi;
if ((uhi.i & 1) == 0) {
/* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
ulo.f = sum.lo;
uhi.i += 1 - ((uhi.i ^ ulo.i) >> 62);
sum.hi = uhi.f;
} }
} }
return (sum.hi);
}
/*
* Compute ldexp(a+b, scale) with a single rounding error. It is assumed
* that the result will be subnormal, and care is taken to ensure that
* double rounding does not occur.
*/
static inline double add_and_denormalize(double a, double b, int scale)
{
struct dd sum;
union {double f; uint64_t i;} uhi, ulo;
int bits_lost;
sum = dd_add(a, b);
/*
* If we are losing at least two bits of accuracy to denormalization,
* then the first lost bit becomes a round bit, and we adjust the
* lowest bit of sum.hi to make it a sticky bit summarizing all the
* bits in sum.lo. With the sticky bit adjusted, the hardware will
* break any ties in the correct direction.
*
* If we are losing only one bit to denormalization, however, we must
* break the ties manually.
*/
if (sum.lo != 0) {
uhi.f = sum.hi;
bits_lost = -((int)(uhi.i >> 52) & 0x7ff) - scale + 1;
if ((bits_lost != 1) ^ (int)(uhi.i & 1)) {
/* hibits += (int)copysign(1.0, sum.hi * sum.lo) */
ulo.f = sum.lo;
uhi.i += 1 - (((uhi.i ^ ulo.i) >> 62) & 2);
sum.hi = uhi.f;
}
}
return scalbn(sum.hi, scale);
}
/*
* Compute a*b exactly, returning the exact result in a struct dd. We assume
* that both a and b are normalized, so no underflow or overflow will occur.
* The current rounding mode must be round-to-nearest.
*/
static inline struct dd dd_mul(double a, double b)
{
static const double split = 0x1p27 + 1.0;
struct dd ret;
double ha, hb, la, lb, p, q;
p = a * split;
ha = a - p;
ha += p;
la = a - ha;
p = b * split;
hb = b - p;
hb += p;
lb = b - hb;
p = ha * hb;
q = ha * lb + la * hb;
ret.hi = p + q;
ret.lo = p - ret.hi + q + la * lb;
return (ret);
}
/* /* add */
* Fused multiply-add: Compute x * y + z with a single rounding error. int sign = nx.sign^ny.sign;
* int samesign = !(sign^nz.sign);
* We use scaling to avoid overflow/underflow, along with the int nonzero = 1;
* canonical precision-doubling technique adapted from: if (samesign) {
* /* r += z */
* Dekker, T. A Floating-Point Technique for Extending the rlo += zlo;
* Available Precision. Numer. Math. 18, 224-242 (1971). rhi += zhi + (rlo < zlo);
* } else {
* This algorithm is sensitive to the rounding precision. FPUs such /* r -= z */
* as the i387 must be set in double-precision mode if variables are uint64_t t = rlo;
* to be stored in FP registers in order to avoid incorrect results. rlo -= zlo;
* This is the default on FreeBSD, but not on many other systems. rhi = rhi - zhi - (t < rlo);
* if (rhi>>63) {
* Hardware instructions should be used on architectures that support it, rlo = -rlo;
* since this implementation will likely be several times slower. rhi = -rhi-!!rlo;
*/ sign = !sign;
double fma(double x, double y, double z)
{
#pragma STDC FENV_ACCESS ON
double xs, ys, zs, adj;
struct dd xy, r;
int oround;
int ex, ey, ez;
int spread;
/*
* Handle special cases. The order of operations and the particular
* return values here are crucial in handling special cases involving
* infinities, NaNs, overflows, and signed zeroes correctly.
*/
if (!isfinite(x) || !isfinite(y))
return (x * y + z);
if (!isfinite(z))
return (z);
if (x == 0.0 || y == 0.0)
return (x * y + z);
if (z == 0.0)
return (x * y);
xs = frexp(x, &ex);
ys = frexp(y, &ey);
zs = frexp(z, &ez);
oround = fegetround();
spread = ex + ey - ez;
/*
* If x * y and z are many orders of magnitude apart, the scaling
* will overflow, so we handle these cases specially. Rounding
* modes other than FE_TONEAREST are painful.
*/
if (spread < -DBL_MANT_DIG) {
#ifdef FE_INEXACT
feraiseexcept(FE_INEXACT);
#endif
#ifdef FE_UNDERFLOW
if (!isnormal(z))
feraiseexcept(FE_UNDERFLOW);
#endif
switch (oround) {
default: /* FE_TONEAREST */
return (z);
#ifdef FE_TOWARDZERO
case FE_TOWARDZERO:
if (x > 0.0 ^ y < 0.0 ^ z < 0.0)
return (z);
else
return (nextafter(z, 0));
#endif
#ifdef FE_DOWNWARD
case FE_DOWNWARD:
if (x > 0.0 ^ y < 0.0)
return (z);
else
return (nextafter(z, -INFINITY));
#endif
#ifdef FE_UPWARD
case FE_UPWARD:
if (x > 0.0 ^ y < 0.0)
return (nextafter(z, INFINITY));
else
return (z);
#endif
} }
nonzero = !!rhi;
} }
if (spread <= DBL_MANT_DIG * 2)
zs = scalbn(zs, -spread);
else
zs = copysign(DBL_MIN, zs);
fesetround(FE_TONEAREST);
/* /* set rhi to top 63bit of the result (last bit is sticky) */
* Basic approach for round-to-nearest: if (nonzero) {
* e += 64;
* (xy.hi, xy.lo) = x * y (exact) d = a_clz_64(rhi)-1;
* (r.hi, r.lo) = xy.hi + z (exact) /* note: d > 0 */
* adj = xy.lo + r.lo (inexact; low bit is sticky) rhi = rhi<<d | rlo>>64-d | !!(rlo<<d);
* result = r.hi + adj (correctly rounded) } else if (rlo) {
*/ d = a_clz_64(rlo)-1;
xy = dd_mul(xs, ys); if (d < 0)
r = dd_add(xy.hi, zs); rhi = rlo>>1 | (rlo&1);
else
spread = ex + ey; rhi = rlo<<d;
} else {
if (r.hi == 0.0) { /* exact +-0 */
/* return x*y + z;
* When the addends cancel to 0, ensure that the result has
* the correct sign.
*/
fesetround(oround);
volatile double vzs = zs; /* XXX gcc CSE bug workaround */
return xy.hi + vzs + scalbn(xy.lo, spread);
} }
e -= d;
if (oround != FE_TONEAREST) {
/* /* convert to double */
* There is no need to worry about double rounding in directed int64_t i = rhi; /* i is in [1<<62,(1<<63)-1] */
* rounding modes. if (sign)
* But underflow may not be raised properly, example in downward rounding: i = -i;
* fma(0x1.000000001p-1000, 0x1.000000001p-30, -0x1p-1066) double r = i; /* |r| is in [0x1p62,0x1p63] */
*/
double ret; if (e < -1022-62) {
#if defined(FE_INEXACT) && defined(FE_UNDERFLOW) /* result is subnormal before rounding */
int e = fetestexcept(FE_INEXACT); if (e == -1022-63) {
feclearexcept(FE_INEXACT); double c = 0x1p63;
#endif if (sign)
fesetround(oround); c = -c;
adj = r.lo + xy.lo; if (r == c) {
ret = scalbn(r.hi + adj, spread); /* min normal after rounding, underflow depends
#if defined(FE_INEXACT) && defined(FE_UNDERFLOW) on arch behaviour which can be imitated by
if (ilogb(ret) < -1022 && fetestexcept(FE_INEXACT)) a double to float conversion */
feraiseexcept(FE_UNDERFLOW); float fltmin = 0x0.ffffff8p-63*FLT_MIN * r;
else if (e) return DBL_MIN/FLT_MIN * fltmin;
feraiseexcept(FE_INEXACT); }
#endif /* one bit is lost when scaled, add another top bit to
return ret; only round once at conversion if it is inexact */
if (rhi << 53) {
i = rhi>>1 | (rhi&1) | 1ull<<62;
if (sign)
i = -i;
r = i;
r = 2*r - c; /* remove top bit */
/* raise underflow portably, such that it
cannot be optimized away */
{
double_t tiny = DBL_MIN/FLT_MIN * r;
r += (double)(tiny*tiny) * (r-r);
}
}
} else {
/* only round once when scaled */
d = 10;
i = ( rhi>>d | !!(rhi<<64-d) ) << d;
if (sign)
i = -i;
r = i;
}
} }
return scalbn(r, e);
adj = add_adjusted(r.lo, xy.lo);
if (spread + ilogb(r.hi) > -1023)
return scalbn(r.hi + adj, spread);
else
return add_and_denormalize(r.hi, adj, spread);
} }
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册