diff --git a/modules/core/include/opencv2/core.hpp b/modules/core/include/opencv2/core.hpp index e7271017dc62f5fd80214cfca4929aa7bc9f272c..b077d06dcd00644dff1407676025db934c354604 100644 --- a/modules/core/include/opencv2/core.hpp +++ b/modules/core/include/opencv2/core.hpp @@ -74,6 +74,7 @@ @{ @defgroup core_utils_sse SSE utilities @defgroup core_utils_neon NEON utilities + @defgroup core_utils_softfloat Softfloat support @} @defgroup core_opengl OpenGL interoperability @defgroup core_ipp Intel IPP Asynchronous C/C++ Converters diff --git a/modules/core/include/opencv2/core/softfloat.hpp b/modules/core/include/opencv2/core/softfloat.hpp index a71b6765d8fe4ee74d0ca0c14422ad573389811b..d5c77f9886bf8c76c7b440977333f997cc48a318 100644 --- a/modules/core/include/opencv2/core/softfloat.hpp +++ b/modules/core/include/opencv2/core/softfloat.hpp @@ -60,43 +60,109 @@ typedef unsigned int uint32_t; namespace cv { +/** @addtogroup core_utils_softfloat + + [SoftFloat](http://www.jhauser.us/arithmetic/SoftFloat.html) is a software implementation + of floating-point calculations according to IEEE 754 standard. + All calculations are done in integers, that's why they are machine-independent and bit-exact. + This library can be useful in accuracy-critical parts like look-up tables generation, tests, etc. + OpenCV contains a subset of SoftFloat partially rewritten to C++. + + ### Types + + There are two basic types: @ref softfloat and @ref softdouble. + These types are binary compatible with float and double types respectively + and support conversions to/from them. + Other types from original SoftFloat library like fp16 or fp128 were thrown away + as well as quiet/signaling NaN support, on-the-fly rounding mode switch + and exception flags (though exceptions can be implemented in the future). + + ### Operations + + Both types support the following: + - Construction from signed and unsigned 32-bit and 64 integers, + float/double or raw binary representation + - Conversions betweeen each other, to float or double and to int + using @ref cvRound, @ref cvTrunc, @ref cvFloor, @ref cvCeil or a bunch of + saturate_cast functions + - Add, subtract, multiply, divide, remainder, square root, FMA with absolute precision + - Comparison operations + - Explicit sign, exponent and significand manipulation through get/set methods, + number state indicators (isInf, isNan, isSubnormal) + - Type-specific constants like eps, minimum/maximum value, best pi approximation, etc. + - min(), max(), abs(), exp(), log() and pow() functions + +*/ +//! @{ + struct softfloat; struct softdouble; struct CV_EXPORTS softfloat { public: + /** @brief Default constructor */ softfloat() { v = 0; } + /** @brief Copy constructor */ softfloat( const softfloat& c) { v = c.v; } + /** @brief Assign constructor */ softfloat& operator=( const softfloat& c ) { if(&c != this) v = c.v; return *this; } + /** @brief Construct from raw + + Builds new value from raw binary representation + */ static const softfloat fromRaw( const uint32_t a ) { softfloat x; x.v = a; return x; } + /** @brief Construct from integer */ explicit softfloat( const uint32_t ); explicit softfloat( const uint64_t ); explicit softfloat( const int32_t ); explicit softfloat( const int64_t ); + /** @brief Construct from float */ explicit softfloat( const float a ) { Cv32suf s; s.f = a; v = s.u; } + /** @brief Type casts */ operator softdouble() const; operator float() const { Cv32suf s; s.u = v; return s.f; } + /** @brief Basic arithmetics */ softfloat operator + (const softfloat&) const; softfloat operator - (const softfloat&) const; softfloat operator * (const softfloat&) const; softfloat operator / (const softfloat&) const; - softfloat operator % (const softfloat&) const; softfloat operator - () const { softfloat x; x.v = v ^ (1U << 31); return x; } + /** @brief Remainder operator + + A quote from original SoftFloat manual: + + > The IEEE Standard remainder operation computes the value + > a - n * b, where n is the integer closest to a / b. + > If a / b is exactly halfway between two integers, n is the even integer + > closest to a / b. The IEEE Standard’s remainder operation is always exact and so requires no rounding. + > Depending on the relative magnitudes of the operands, the remainder functions + > can take considerably longer to execute than the other SoftFloat functions. + > This is an inherent characteristic of the remainder operation itself and is not a flaw + > in the SoftFloat implementation. + */ + softfloat operator % (const softfloat&) const; + softfloat& operator += (const softfloat& a) { *this = *this + a; return *this; } softfloat& operator -= (const softfloat& a) { *this = *this - a; return *this; } softfloat& operator *= (const softfloat& a) { *this = *this * a; return *this; } softfloat& operator /= (const softfloat& a) { *this = *this / a; return *this; } softfloat& operator %= (const softfloat& a) { *this = *this % a; return *this; } + /** @brief Comparison operations + + - Any operation with NaN produces false + + The only exception is when x is NaN: x != y for any y. + - Positive and negative zeros are equal + */ bool operator == ( const softfloat& ) const; bool operator != ( const softfloat& ) const; bool operator > ( const softfloat& ) const; @@ -104,13 +170,58 @@ public: bool operator < ( const softfloat& ) const; bool operator <= ( const softfloat& ) const; - bool isNaN() const { return (v & 0x7fffffff) > 0x7f800000; } - bool isInf() const { return (v & 0x7fffffff) == 0x7f800000; } + /** @brief NaN state indicator */ + inline bool isNaN() const { return (v & 0x7fffffff) > 0x7f800000; } + /** @brief Inf state indicator */ + inline bool isInf() const { return (v & 0x7fffffff) == 0x7f800000; } + /** @brief Subnormal number indicator */ + inline bool isSubnormal() const { return ((v >> 23) & 0xFF) == 0; } + + /** @brief Get sign bit */ + inline bool getSign() const { return (v >> 31) != 0; } + /** @brief Construct a copy with new sign bit */ + inline softfloat setSign(bool sign) const { softfloat x; x.v = (v & ((1U << 31) - 1)) | ((uint32_t)sign << 31); return x; } + /** @brief Get 0-based exponent */ + inline int getExp() const { return ((v >> 23) & 0xFF) - 127; } + /** @brief Construct a copy with new 0-based exponent */ + inline softfloat setExp(int e) const { softfloat x; x.v = (v & 0x807fffff) | (((e + 127) & 0xFF) << 23 ); return x; } + + /** @brief Get a fraction part + + Returns a number 1 <= x < 2 with the same significand + */ + inline softfloat getFrac() const + { + uint_fast32_t vv = (v & 0x007fffff) | (127 << 23); + return softfloat::fromRaw(vv); + } + /** @brief Construct a copy with provided significand + Constructs a copy of a number with significand taken from parameter + */ + inline softfloat setFrac(const softfloat& s) const + { + softfloat x; + x.v = (v & 0xff800000) | (s.v & 0x007fffff); + return x; + } + + /** @brief Zero constant */ static softfloat zero() { return softfloat::fromRaw( 0 ); } + /** @brief Positive infinity constant */ static softfloat inf() { return softfloat::fromRaw( 0xFF << 23 ); } + /** @brief Default NaN constant */ static softfloat nan() { return softfloat::fromRaw( 0x7fffffff ); } + /** @brief One constant */ static softfloat one() { return softfloat::fromRaw( 127 << 23 ); } + /** @brief Smallest normalized value */ + static softfloat min() { return softfloat::fromRaw( 0x01 << 23 ); } + /** @brief Difference between 1 and next representable value */ + static softfloat eps() { return softfloat::fromRaw( (127 - 23) << 23 ); } + /** @brief Biggest finite value */ + static softfloat max() { return softfloat::fromRaw( (0xFF << 23) - 1 ); } + /** @brief Correct pi approximation */ + static softfloat pi() { return softfloat::fromRaw( 0x40490fdb ); } uint32_t v; }; @@ -121,37 +232,68 @@ public: struct CV_EXPORTS softdouble { public: + /** @brief Default constructor */ softdouble() : v(0) { } + /** @brief Copy constructor */ softdouble( const softdouble& c) { v = c.v; } + /** @brief Assign constructor */ softdouble& operator=( const softdouble& c ) { if(&c != this) v = c.v; return *this; } + /** @brief Construct from raw + + Builds new value from raw binary representation + */ static softdouble fromRaw( const uint64_t a ) { softdouble x; x.v = a; return x; } + /** @brief Construct from integer */ explicit softdouble( const uint32_t ); explicit softdouble( const uint64_t ); explicit softdouble( const int32_t ); explicit softdouble( const int64_t ); + /** @brief Construct from double */ explicit softdouble( const double a ) { Cv64suf s; s.f = a; v = s.u; } + /** @brief Type casts */ operator softfloat() const; operator double() const { Cv64suf s; s.u = v; return s.f; } + /** @brief Basic arithmetics */ softdouble operator + (const softdouble&) const; softdouble operator - (const softdouble&) const; softdouble operator * (const softdouble&) const; softdouble operator / (const softdouble&) const; - softdouble operator % (const softdouble&) const; softdouble operator - () const { softdouble x; x.v = v ^ (1ULL << 63); return x; } + /** @brief Remainder operator + + A quote from original SoftFloat manual: + + > The IEEE Standard remainder operation computes the value + > a - n * b, where n is the integer closest to a / b. + > If a / b is exactly halfway between two integers, n is the even integer + > closest to a / b. The IEEE Standard’s remainder operation is always exact and so requires no rounding. + > Depending on the relative magnitudes of the operands, the remainder functions + > can take considerably longer to execute than the other SoftFloat functions. + > This is an inherent characteristic of the remainder operation itself and is not a flaw + > in the SoftFloat implementation. + */ + softdouble operator % (const softdouble&) const; + softdouble& operator += (const softdouble& a) { *this = *this + a; return *this; } softdouble& operator -= (const softdouble& a) { *this = *this - a; return *this; } softdouble& operator *= (const softdouble& a) { *this = *this * a; return *this; } softdouble& operator /= (const softdouble& a) { *this = *this / a; return *this; } softdouble& operator %= (const softdouble& a) { *this = *this % a; return *this; } + /** @brief Comparison operations + + - Any operation with NaN produces false + + The only exception is when x is NaN: x != y for any y. + - Positive and negative zeros are equal + */ bool operator == ( const softdouble& ) const; bool operator != ( const softdouble& ) const; bool operator > ( const softdouble& ) const; @@ -159,13 +301,63 @@ public: bool operator < ( const softdouble& ) const; bool operator <= ( const softdouble& ) const; - bool isNaN() const { return (v & 0x7fffffffffffffff) > 0x7ff0000000000000; } - bool isInf() const { return (v & 0x7fffffffffffffff) == 0x7ff0000000000000; } + /** @brief NaN state indicator */ + inline bool isNaN() const { return (v & 0x7fffffffffffffff) > 0x7ff0000000000000; } + /** @brief Inf state indicator */ + inline bool isInf() const { return (v & 0x7fffffffffffffff) == 0x7ff0000000000000; } + /** @brief Subnormal number indicator */ + inline bool isSubnormal() const { return ((v >> 52) & 0x7FF) == 0; } + + /** @brief Get sign bit */ + inline bool getSign() const { return (v >> 63) != 0; } + /** @brief Construct a copy with new sign bit */ + softdouble setSign(bool sign) const { softdouble x; x.v = (v & ((1ULL << 63) - 1)) | ((uint_fast64_t)(sign) << 63); return x; } + /** @brief Get 0-based exponent */ + inline int getExp() const { return ((v >> 52) & 0x7FF) - 1023; } + /** @brief Construct a copy with new 0-based exponent */ + inline softdouble setExp(int e) const + { + softdouble x; + x.v = (v & 0x800FFFFFFFFFFFFF) | ((uint_fast64_t)((e + 1023) & 0x7FF) << 52); + return x; + } + + /** @brief Get a fraction part + + Returns a number 1 <= x < 2 with the same significand + */ + inline softdouble getFrac() const + { + uint_fast64_t vv = (v & 0x000FFFFFFFFFFFFF) | ((uint_fast64_t)(1023) << 52); + return softdouble::fromRaw(vv); + } + /** @brief Construct a copy with provided significand + + Constructs a copy of a number with significand taken from parameter + */ + inline softdouble setFrac(const softdouble& s) const + { + softdouble x; + x.v = (v & 0xFFF0000000000000) | (s.v & 0x000FFFFFFFFFFFFF); + return x; + } + /** @brief Zero constant */ static softdouble zero() { return softdouble::fromRaw( 0 ); } + /** @brief Positive infinity constant */ static softdouble inf() { return softdouble::fromRaw( (uint_fast64_t)(0x7FF) << 52 ); } + /** @brief Default NaN constant */ static softdouble nan() { return softdouble::fromRaw( CV_BIG_INT(0x7FFFFFFFFFFFFFFF) ); } + /** @brief One constant */ static softdouble one() { return softdouble::fromRaw( (uint_fast64_t)( 1023) << 52 ); } + /** @brief Smallest normalized value */ + static softdouble min() { return softdouble::fromRaw( (uint_fast64_t)( 0x01) << 52 ); } + /** @brief Difference between 1 and next representable value */ + static softdouble eps() { return softdouble::fromRaw( (uint_fast64_t)( 1023 - 52 ) << 52 ); } + /** @brief Biggest finite value */ + static softdouble max() { return softdouble::fromRaw( ((uint_fast64_t)(0x7FF) << 52) - 1 ); } + /** @brief Correct pi approximation */ + static softdouble pi() { return softdouble::fromRaw( CV_BIG_INT(0x400921FB54442D18) ); } uint64_t v; }; @@ -173,9 +365,14 @@ public: /*---------------------------------------------------------------------------- *----------------------------------------------------------------------------*/ +/** @brief Fused Multiplication and Addition + +Computes (a*b)+c with single rounding +*/ CV_EXPORTS softfloat mulAdd( const softfloat& a, const softfloat& b, const softfloat & c); CV_EXPORTS softdouble mulAdd( const softdouble& a, const softdouble& b, const softdouble& c); +/** @brief Square root */ CV_EXPORTS softfloat sqrt( const softfloat& a ); CV_EXPORTS softdouble sqrt( const softdouble& a ); } @@ -184,20 +381,25 @@ CV_EXPORTS softdouble sqrt( const softdouble& a ); | Ported from OpenCV and added for usability *----------------------------------------------------------------------------*/ +/** @brief Truncates number to integer with minimum magnitude */ CV_EXPORTS int cvTrunc(const cv::softfloat& a); CV_EXPORTS int cvTrunc(const cv::softdouble& a); +/** @brief Rounds a number to nearest even integer */ CV_EXPORTS int cvRound(const cv::softfloat& a); CV_EXPORTS int cvRound(const cv::softdouble& a); +/** @brief Rounds a number down to integer */ CV_EXPORTS int cvFloor(const cv::softfloat& a); CV_EXPORTS int cvFloor(const cv::softdouble& a); +/** @brief Rounds number up to integer */ CV_EXPORTS int cvCeil(const cv::softfloat& a); CV_EXPORTS int cvCeil(const cv::softdouble& a); namespace cv { +/** @brief Saturate casts */ template static inline _Tp saturate_cast(softfloat a) { return _Tp(a); } template static inline _Tp saturate_cast(softdouble a) { return _Tp(a); } @@ -216,30 +418,88 @@ template<> inline short saturate_cast(softdouble a) { return (short)std:: template<> inline int saturate_cast(softfloat a) { return cvRound(a); } template<> inline int saturate_cast(softdouble a) { return cvRound(a); } -// we intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +/** @brief Saturate cast to unsigned integer +We intentionally do not clip negative numbers, to make -1 become 0xffffffff etc. +*/ template<> inline unsigned saturate_cast(softfloat a) { return cvRound(a); } template<> inline unsigned saturate_cast(softdouble a) { return cvRound(a); } +/** @brief Min and Max functions */ inline softfloat min(const softfloat& a, const softfloat& b) { return (a > b) ? b : a; } inline softdouble min(const softdouble& a, const softdouble& b) { return (a > b) ? b : a; } inline softfloat max(const softfloat& a, const softfloat& b) { return (a > b) ? a : b; } inline softdouble max(const softdouble& a, const softdouble& b) { return (a > b) ? a : b; } +/** @brief Absolute value */ inline softfloat abs( softfloat a) { softfloat x; x.v = a.v & ((1U << 31) - 1); return x; } inline softdouble abs( softdouble a) { softdouble x; x.v = a.v & ((1ULL << 63) - 1); return x; } +/** @brief Exponent + +Special cases: +- exp(NaN) is NaN +- exp(-Inf) == 0 +- exp(+Inf) == +Inf +*/ CV_EXPORTS softfloat exp( const softfloat& a); CV_EXPORTS softdouble exp( const softdouble& a); +/** @brief Natural logarithm + +Special cases: +- log(NaN), log(x < 0) are NaN +- log(0) == -Inf +*/ CV_EXPORTS softfloat log( const softfloat& a ); CV_EXPORTS softdouble log( const softdouble& a ); +/** @brief Raising to the power + +Special cases: +- x**NaN is NaN for any x +- ( |x| == 1 )**Inf is NaN +- ( |x| > 1 )**+Inf or ( |x| < 1 )**-Inf is +Inf +- ( |x| > 1 )**-Inf or ( |x| < 1 )**+Inf is 0 +- x ** 0 == 1 for any x +- x ** 1 == 1 for any x +- NaN ** y is NaN for any other y +- Inf**(y < 0) == 0 +- Inf ** y is +Inf for any other y +- (x < 0)**y is NaN for any other y if x can't be correctly rounded to integer +- 0 ** 0 == 1 +- 0 ** (y < 0) is +Inf +- 0 ** (y > 0) is 0 +*/ CV_EXPORTS softfloat pow( const softfloat& a, const softfloat& b); CV_EXPORTS softdouble pow( const softdouble& a, const softdouble& b); -CV_EXPORTS softfloat cbrt(const softfloat& a); +/** @brief Cube root + +Special cases: +- cbrt(NaN) is NaN +- cbrt(+/-Inf) is +/-Inf +*/ +CV_EXPORTS softfloat cbrt( const softfloat& a ); + +/** @brief Sine + +Special cases: +- sin(Inf) or sin(NaN) is NaN +- sin(x) == x when sin(x) is close to zero +*/ +CV_EXPORTS softdouble sin( const softdouble& a ); + +/** @brief Cosine + * +Special cases: +- cos(Inf) or cos(NaN) is NaN +- cos(x) == +/- 1 when cos(x) is close to +/- 1 +*/ +CV_EXPORTS softdouble cos( const softdouble& a ); } +//! @} + #endif diff --git a/modules/core/src/softfloat.cpp b/modules/core/src/softfloat.cpp index ac39ee8288d5f2bd87cdc63f76dc1d291753082a..7937d081ec0e4929c39767bd0a3bc7e5b2a60348 100644 --- a/modules/core/src/softfloat.cpp +++ b/modules/core/src/softfloat.cpp @@ -2,7 +2,8 @@ // It is subject to the license terms in the LICENSE file found in the top-level directory // of this distribution and at http://opencv.org/license.html -// This file is based on files from package issued with the following license: +// This file is based on files from packages softfloat and fdlibm +// issued with the following licenses: /*============================================================================ @@ -39,6 +40,29 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. =============================================================================*/ +// FDLIBM licenses: + +/* + * ==================================================== + * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. + * + * Developed at SunSoft, a Sun Microsystems, Inc. business. + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + +/* + * ==================================================== + * Copyright (C) 2004 by Sun Microsystems, Inc. All rights reserved. + * + * Permission to use, copy, modify, and distribute this + * software is freely granted, provided that this notice + * is preserved. + * ==================================================== + */ + #include "precomp.hpp" #include "opencv2/core/softfloat.hpp" @@ -78,13 +102,18 @@ static inline void raiseFlags( uint_fast8_t /* flags */) | Software floating-point rounding mode. *----------------------------------------------------------------------------*/ enum { - round_near_even = 0, - round_minMag = 1, - round_min = 2, - round_max = 3, - round_near_maxMag = 4, - round_odd = 5 + round_near_even = 0, // round to nearest, with ties to even + round_minMag = 1, // round to minimum magnitude (toward zero) + round_min = 2, // round to minimum (down) + round_max = 3, // round to maximum (up) + round_near_maxMag = 4, // round to nearest, with ties to maximum magnitude (away from zero) + round_odd = 5 // round to odd (jamming) }; +/* What is round_odd (from SoftFloat manual): + * If supported, mode round_odd first rounds a floating-point result to minimum magnitude, + * the same as round_minMag, and then, if the result is inexact, the least-significant bit + * of the result is set to 1. This rounding mode is also known as jamming. + */ //fixed to make softfloat code stateless static const uint_fast8_t globalRoundingMode = round_near_even; @@ -169,11 +198,14 @@ static bool f64_le( float64_t, float64_t ); static bool f64_lt( float64_t, float64_t ); /*---------------------------------------------------------------------------- -| Ported from OpenCV and added for usability +| Ported from OpenCV and fdlibm and added for usability *----------------------------------------------------------------------------*/ static float32_t f32_powi( float32_t x, int y); static float64_t f64_powi( float64_t x, int y); +static float64_t f64_sin_kernel(float64_t x); +static float64_t f64_cos_kernel(float64_t x); +static void f64_sincos_reduce(const float64_t& x, float64_t& y, int& n); static float32_t f32_exp( float32_t x); static float64_t f64_exp(float64_t x); @@ -182,6 +214,8 @@ static float64_t f64_log(float64_t x); static float32_t f32_cbrt(float32_t x); static float32_t f32_pow( float32_t x, float32_t y); static float64_t f64_pow( float64_t x, float64_t y); +static float64_t f64_sin( float64_t x ); +static float64_t f64_cos( float64_t x ); /*---------------------------------------------------------------------------- | softfloat and softdouble methods and members @@ -262,6 +296,9 @@ softdouble pow( const softdouble& a, const softdouble& b) { return f64_pow(a, b) softfloat cbrt(const softfloat& a) { return f32_cbrt(a); } +softdouble sin(const softdouble& a) { return f64_sin(a); } +softdouble cos(const softdouble& a) { return f64_cos(a); } + /*---------------------------------------------------------------------------- | The values to return on conversions to 32-bit integer formats that raise an | invalid exception. @@ -773,12 +810,10 @@ static bool f32_eq( float32_t a, float32_t b ) uiA = a.v; uiB = b.v; - if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { - if ( - softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) - ) { + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) + { + if (softfloat_isSigNaNF32UI( uiA ) || softfloat_isSigNaNF32UI( uiB ) ) raiseFlags( flag_invalid ); - } return false; } return (uiA == uiB) || ! (uint32_t) ((uiA | uiB)<<1); @@ -792,15 +827,15 @@ static bool f32_le( float32_t a, float32_t b ) uiA = a.v; uiB = b.v; - if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) + { raiseFlags( flag_invalid ); return false; } signA = signF32UI( uiA ); signB = signF32UI( uiB ); - return - (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) - : (uiA == uiB) || (signA ^ (uiA < uiB)); + return (signA != signB) ? signA || ! (uint32_t) ((uiA | uiB)<<1) + : (uiA == uiB) || (signA ^ (uiA < uiB)); } static bool f32_lt( float32_t a, float32_t b ) @@ -809,17 +844,16 @@ static bool f32_lt( float32_t a, float32_t b ) uint_fast32_t uiB; bool signA, signB; - uiA = a.v; - uiB = b.v; - if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) { + uiA = a.v; uiB = b.v; + if ( isNaNF32UI( uiA ) || isNaNF32UI( uiB ) ) + { raiseFlags( flag_invalid ); return false; } signA = signF32UI( uiA ); signB = signF32UI( uiB ); - return - (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) - : (uiA != uiB) && (signA ^ (uiA < uiB)); + return (signA != signB) ? signA && ((uint32_t) ((uiA | uiB)<<1) != 0) + : (uiA != uiB) && (signA ^ (uiA < uiB)); } static float32_t f32_mulAdd( float32_t a, float32_t b, float32_t c ) @@ -1465,12 +1499,10 @@ static bool f64_eq( float64_t a, float64_t b ) uiA = a.v; uiB = b.v; - if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) { - if ( - softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) - ) { + if ( isNaNF64UI( uiA ) || isNaNF64UI( uiB ) ) + { + if ( softfloat_isSigNaNF64UI( uiA ) || softfloat_isSigNaNF64UI( uiB ) ) raiseFlags( flag_invalid ); - } return false; } return (uiA == uiB) || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )); @@ -1490,10 +1522,8 @@ static bool f64_le( float64_t a, float64_t b ) } signA = signF64UI( uiA ); signB = signF64UI( uiB ); - return - (signA != signB) - ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) - : (uiA == uiB) || (signA ^ (uiA < uiB)); + return (signA != signB) ? signA || ! ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA == uiB) || (signA ^ (uiA < uiB)); } static bool f64_lt( float64_t a, float64_t b ) @@ -1510,10 +1540,8 @@ static bool f64_lt( float64_t a, float64_t b ) } signA = signF64UI( uiA ); signB = signF64UI( uiB ); - return - (signA != signB) - ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) - : (uiA != uiB) && (signA ^ (uiA < uiB)); + return (signA != signB) ? signA && ((uiA | uiB) & UINT64_C( 0x7FFFFFFFFFFFFFFF )) + : (uiA != uiB) && (signA ^ (uiA < uiB)); } static float64_t f64_mulAdd( float64_t a, float64_t b, float64_t c ) @@ -3942,4 +3970,259 @@ static float64_t f64_powi( float64_t x, int y) return v; } +/* + * sin and cos functions taken from fdlibm with original comments + * (edited where needed) + */ + +static const float64_t pi2 = float64_t::pi().setExp(2); +static const float64_t piby2 = float64_t::pi().setExp(0); +static const float64_t piby4 = float64_t::pi().setExp(-1); +static const float64_t half = float64_t::one()/float64_t(2); +static const float64_t third = float64_t::one()/float64_t(3); + +/* __kernel_sin( x, y, iy) + * N.B. from OpenCV side: y and iy removed, simplified to polynomial + * + * kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854 + * Input x is assumed to be bounded by ~pi/4 in magnitude. + * Input y is the tail of x. + * Input iy indicates whether y is 0. (if iy=0, y assume to be 0). + * + * Algorithm + * 1. Since sin(-x) = -sin(x), we need only to consider positive x. + * 2. if x < 2^-27 (hx<0x3e400000 0), return x with inexact if x!=0. + * 3. sin(x) is approximated by a polynomial of degree 13 on + * [0,pi/4] + * 3 13 + * sin(x) ~ x + S1*x + ... + S6*x + * where + * + * |sin(x) 2 4 6 8 10 12 | -58 + * |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x +S6*x )| <= 2 + * | x | + * + * 4. sin(x+y) = sin(x) + sin'(x')*y + * ~ sin(x) + (1-x*x/2)*y + * For better accuracy, let + * 3 2 2 2 2 + * r = x *(S2+x *(S3+x *(S4+x *(S5+x *S6)))) + * then 3 2 + * sin(x) = x + (S1*x + (x *(r-y/2)+y)) + */ + +static const float64_t +// -1/3! = -1/6 +S1 = float64_t::fromRaw( 0xBFC5555555555549 ), +// 1/5! = 1/120 +S2 = float64_t::fromRaw( 0x3F8111111110F8A6 ), +// -1/7! = -1/5040 +S3 = float64_t::fromRaw( 0xBF2A01A019C161D5 ), +// 1/9! = 1/362880 +S4 = float64_t::fromRaw( 0x3EC71DE357B1FE7D ), +// -1/11! = -1/39916800 +S5 = float64_t::fromRaw( 0xBE5AE5E68A2B9CEB ), +// 1/13! = 1/6227020800 +S6 = float64_t::fromRaw( 0x3DE5D93A5ACFD57C ); + +static float64_t f64_sin_kernel(float64_t x) +{ + if(x.getExp() < -27) + { + if(x != x.zero()) raiseFlags(flag_inexact); + return x; + } + + float64_t z = x*x; + return x*mulAdd(z, mulAdd(z, mulAdd(z, mulAdd(z, mulAdd(z, mulAdd(z, + S6, S5), S4), S3), S2), S1), x.one()); +} + +/* + * __kernel_cos( x, y ) + * N.B. from OpenCV's side: y removed, simplified to one polynomial + * + * kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164 + * Input x is assumed to be bounded by ~pi/4 in magnitude. + * Input y is the tail of x. + * + * Algorithm + * 1. Since cos(-x) = cos(x), we need only to consider positive x. + * 2. if x < 2^-27 (hx<0x3e400000 0), return 1 with inexact if x!=0. + * 3. cos(x) is approximated by a polynomial of degree 14 on + * [0,pi/4] + * 4 14 + * cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x + * where the remez error is + * + * | 2 4 6 8 10 12 14 | -58 + * |cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x +C6*x )| <= 2 + * | | + * + * 4 6 8 10 12 14 + * 4. let r = C1*x +C2*x +C3*x +C4*x +C5*x +C6*x , then + * cos(x) = 1 - x*x/2 + r + * since cos(x+y) ~ cos(x) - sin(x)*y + * ~ cos(x) - x*y, + * a correction term is necessary in cos(x) and hence + * cos(x+y) = 1 - (x*x/2 - (r - x*y)) + * + * N.B. The following part was removed since we have enough precision + * + * For better accuracy when x > 0.3, let qx = |x|/4 with + * the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125. + * Then + * cos(x+y) = (1-qx) - ((x*x/2-qx) - (r-x*y)). + * Note that 1-qx and (x*x/2-qx) is EXACT here, and the + * magnitude of the latter is at least a quarter of x*x/2, + * thus, reducing the rounding error in the subtraction. + */ + +static const float64_t +// 1/4! = 1/24 +C1 = float64_t::fromRaw( 0x3FA555555555554C ), +// -1/6! = -1/720 +C2 = float64_t::fromRaw( 0xBF56C16C16C15177 ), +// 1/8! = 1/40320 +C3 = float64_t::fromRaw( 0x3EFA01A019CB1590 ), +// -1/10! = -1/3628800 +C4 = float64_t::fromRaw( 0xBE927E4F809C52AD ), +// 1/12! = 1/479001600 +C5 = float64_t::fromRaw( 0x3E21EE9EBDB4B1C4 ), +// -1/14! = -1/87178291200 +C6 = float64_t::fromRaw( 0xBDA8FAE9BE8838D4 ); + +static float64_t f64_cos_kernel(float64_t x) +{ + if(x.getExp() < -27) + { + if(x != x.zero()) raiseFlags(flag_inexact); + return x.one(); + } + + float64_t z = x*x; + return mulAdd(mulAdd(z, mulAdd(z, mulAdd(z, mulAdd(z, mulAdd(z, mulAdd(z, + C6, C5), C4), C3), C2), C1), -half), z, x.one()); +} + +static void f64_sincos_reduce(const float64_t& x, float64_t& y, int& n) +{ + if(abs(x) < piby4) + { + n = 0, y = x; + } + else + { + /* argument reduction needed */ + float64_t p = f64_rem(x, pi2); + float64_t v = p - float64_t::eps().setExp(-10); + if(abs(v) <= piby4) + { + n = 0; y = p; + } + else if(abs(v) <= (float64_t(3)*piby4)) + { + n = (p > 0) ? 1 : 3; + y = (p > 0) ? p - piby2 : p + piby2; + if(p > 0) n = 1, y = p - piby2; + else n = 3, y = p + piby2; + } + else + { + n = 2; + y = (p > 0) ? p - p.pi() : p + p.pi(); + } + } +} + +/* sin(x) + * Return sine function of x. + * + * kernel function: + * __kernel_sin ... sine function on [-pi/4,pi/4] + * __kernel_cos ... cose function on [-pi/4,pi/4] + * + * Method. + * Let S,C and T denote the sin, cos and tan respectively on + * [-PI/4, +PI/4]. Reduce the argument x to y = x-k*pi/2 + * in [-pi/4 , +pi/4], and let n = k mod 4. + * We have + * + * n sin(x) cos(x) tan(x) + * ---------------------------------------------------------- + * 0 S C T + * 1 C -S -1/T + * 2 -S -C T + * 3 -C S -1/T + * ---------------------------------------------------------- + * + * Special cases: + * Let trig be any of sin, cos, or tan. + * trig(+-INF) is NaN, with signals; + * trig(NaN) is that NaN; + * + * Accuracy: + * TRIG(x) returns trig(x) nearly rounded + */ + +static float64_t f64_sin( float64_t x ) +{ + if(x.isInf() || x.isNaN()) return x.nan(); + + float64_t y; int n; + f64_sincos_reduce(x, y, n); + switch (n) + { + case 0: return f64_sin_kernel(y); + case 1: return f64_cos_kernel(y); + case 2: return -f64_sin_kernel(y); + default: return -f64_cos_kernel(y); + } +} + +/* cos(x) + * Return cosine function of x. + * + * kernel function: + * __kernel_sin ... sine function on [-pi/4,pi/4] + * __kernel_cos ... cosine function on [-pi/4,pi/4] + * + * Method. + * Let S,C and T denote the sin, cos and tan respectively on + * [-PI/4, +PI/4]. Reduce the argument x to y = x-k*pi/2 + * in [-pi/4 , +pi/4], and let n = k mod 4. + * We have + * + * n sin(x) cos(x) tan(x) + * ---------------------------------------------------------- + * 0 S C T + * 1 C -S -1/T + * 2 -S -C T + * 3 -C S -1/T + * ---------------------------------------------------------- + * + * Special cases: + * Let trig be any of sin, cos, or tan. + * trig(+-INF) is NaN, with signals; + * trig(NaN) is that NaN; + * + * Accuracy: + * TRIG(x) returns trig(x) nearly rounded + */ + +static float64_t f64_cos( float64_t x ) +{ + if(x.isInf() || x.isNaN()) return x.nan(); + + float64_t y; int n; + f64_sincos_reduce(x, y, n); + switch (n) + { + case 0: return f64_cos_kernel(y); + case 1: return -f64_sin_kernel(y); + case 2: return -f64_cos_kernel(y); + default: return f64_sin_kernel(y); + } +} + } diff --git a/modules/core/test/test_math.cpp b/modules/core/test/test_math.cpp index 3bb4201e560d03ea41d122c081c261e346d2dea0..a54425cf556031186dcf930e80372748ea603d62 100644 --- a/modules/core/test/test_math.cpp +++ b/modules/core/test/test_math.cpp @@ -3071,12 +3071,10 @@ TEST(Core_QR_Solver, accuracy64f) softdouble naiveExp(softdouble x) { - int exponent = ((x.v >>52) & 0x7FF) - 1023; - int sign = (((uint64_t)(x.v) >> 63) != 0) ? -1 : 1; + int exponent = x.getExp(); + int sign = x.getSign() ? -1 : 1; if(sign < 0 && exponent >= 10) return softdouble::inf(); - softdouble mantissa; - //mantissa.v = packToF64UI(0, 1023, fracF64UI(x.v)); - mantissa.v = ((uint64_t)(1023)<<52) + (((x.v) & UINT64_C( 0x000FFFFFFFFFFFFF ))); + softdouble mantissa = x.getFrac(); //Taylor series for mantissa uint64 fac[20] = {1, 2, 6, 24, 120, 720, 5040, 40320, 362880, 3628800, 39916800, 479001600, 6227020800, 87178291200, 1307674368000, @@ -3109,34 +3107,33 @@ TEST(Core_SoftFloat, exp32) ASSERT_EQ (exp(-softfloat::inf()), softfloat::zero()); //ln(FLT_MAX) ~ 88.722 - const float ln_max = 88.722f; - vector inputs; + const softfloat ln_max(88.722f); + vector inputs; RNG rng(0); - inputs.push_back(0); - inputs.push_back(1); - inputs.push_back(FLT_MIN); + inputs.push_back(softfloat::zero()); + inputs.push_back(softfloat::one()); + inputs.push_back(softfloat::min()); for(int i = 0; i < 50000; i++) { Cv32suf x; x.fmt.sign = rng() % 2; x.fmt.exponent = rng() % (10 + 127); //bigger exponent will produce inf x.fmt.significand = rng() % (1 << 23); - if(x.f > ln_max) - x.f = rng.uniform(0.0f, ln_max); - inputs.push_back(x.f); + if(softfloat(x.f) > ln_max) + x.f = rng.uniform(0.0f, (float)ln_max); + inputs.push_back(softfloat(x.f)); } for(size_t i = 0; i < inputs.size(); i++) { - float xf = inputs[i]; - softfloat x(xf); + softfloat x(inputs[i]); softfloat y = exp(x); ASSERT_TRUE(!y.isNaN()); ASSERT_TRUE(!y.isInf()); ASSERT_GE(y, softfloat::zero()); softfloat ygood = naiveExp(x); softfloat diff = abs(ygood - y); - const softfloat eps(FLT_EPSILON); + const softfloat eps = softfloat::eps(); if(diff > eps) { ASSERT_LE(diff/max(abs(y), abs(ygood)), eps); @@ -3152,12 +3149,12 @@ TEST(Core_SoftFloat, exp64) ASSERT_EQ (exp(-softdouble::inf()), softdouble::zero()); //ln(DBL_MAX) ~ 709.7827 - const double ln_max = 709.7827; - vector inputs; + const softdouble ln_max(709.7827); + vector inputs; RNG rng(0); - inputs.push_back(0); - inputs.push_back(1); - inputs.push_back(DBL_MIN); + inputs.push_back(softdouble::zero()); + inputs.push_back(softdouble::one()); + inputs.push_back(softdouble::min()); for(int i = 0; i < 50000; i++) { Cv64suf x; @@ -3165,22 +3162,21 @@ TEST(Core_SoftFloat, exp64) uint64 exponent = rng() % (10 + 1023); //bigger exponent will produce inf uint64 mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1); x.u = (sign << 63) | (exponent << 52) | mantissa; - if(x.f > ln_max) - x.f = rng.uniform(0.0, ln_max); - inputs.push_back(x.f); + if(softdouble(x.f) > ln_max) + x.f = rng.uniform(0.0, (double)ln_max); + inputs.push_back(softdouble(x.f)); } for(size_t i = 0; i < inputs.size(); i++) { - double xf = inputs[i]; - softdouble x(xf); + softdouble x(inputs[i]); softdouble y = exp(x); ASSERT_TRUE(!y.isNaN()); ASSERT_TRUE(!y.isInf()); ASSERT_GE(y, softdouble::zero()); softdouble ygood = naiveExp(x); softdouble diff = abs(ygood - y); - const softdouble eps(DBL_EPSILON); + const softdouble eps = softdouble::eps(); if(diff > eps) { ASSERT_LE(diff/max(abs(y), abs(ygood)), softdouble(8192)*eps); @@ -3205,25 +3201,24 @@ TEST(Core_SoftFloat, log32) } ASSERT_TRUE(log(softfloat::zero()).isInf()); - vector inputs; + vector inputs; - inputs.push_back(1); - inputs.push_back(std::exp(1.f)); - inputs.push_back(FLT_MIN); - inputs.push_back(FLT_MAX); + inputs.push_back(softfloat::one()); + inputs.push_back(softfloat(exp(softfloat::one()))); + inputs.push_back(softfloat::min()); + inputs.push_back(softfloat::max()); for(int i = 0; i < nValues; i++) { Cv32suf x; x.fmt.sign = 0; x.fmt.exponent = rng() % 255; x.fmt.significand = rng() % (1 << 23); - inputs.push_back(x.f); + inputs.push_back(softfloat(x.f)); } for(size_t i = 0; i < inputs.size(); i++) { - float xf = inputs[i]; - softfloat x(xf); + softfloat x(inputs[i]); softfloat y = log(x); ASSERT_TRUE(!y.isNaN()); ASSERT_TRUE(!y.isInf()); @@ -3231,9 +3226,10 @@ TEST(Core_SoftFloat, log32) softfloat diff = abs(ex - x); // 88 is approx estimate of max exp() argument ASSERT_TRUE(!ex.isInf() || (y > softfloat(88))); - if(!ex.isInf() && diff > softfloat(FLT_EPSILON)) + const softfloat eps2 = softfloat().setExp(-17); + if(!ex.isInf() && diff > softfloat::eps()) { - ASSERT_LT(diff/max(abs(ex), x), softfloat(0.00001f)); + ASSERT_LT(diff/max(abs(ex), x), eps2); } } } @@ -3256,11 +3252,11 @@ TEST(Core_SoftFloat, log64) } ASSERT_TRUE(log(softdouble::zero()).isInf()); - vector inputs; - inputs.push_back(1); + vector inputs; + inputs.push_back(softdouble::one()); inputs.push_back(exp(softdouble::one())); - inputs.push_back(DBL_MIN); - inputs.push_back(DBL_MAX); + inputs.push_back(softdouble::min()); + inputs.push_back(softdouble::max()); for(int i = 0; i < nValues; i++) { Cv64suf x; @@ -3268,13 +3264,12 @@ TEST(Core_SoftFloat, log64) uint64 exponent = rng() % 2047; uint64 mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1); x.u = (sign << 63) | (exponent << 52) | mantissa; - inputs.push_back(abs(x.f)); + inputs.push_back(softdouble(x.f)); } for(size_t i = 0; i < inputs.size(); i++) { - double xf = inputs[i]; - softdouble x(xf); + softdouble x(inputs[i]); softdouble y = log(x); ASSERT_TRUE(!y.isNaN()); ASSERT_TRUE(!y.isInf()); @@ -3282,40 +3277,40 @@ TEST(Core_SoftFloat, log64) softdouble diff = abs(ex - x); // 700 is approx estimate of max exp() argument ASSERT_TRUE(!ex.isInf() || (y > softdouble(700))); - if(!ex.isInf() && diff > softdouble(DBL_EPSILON)) + const softdouble eps2 = softdouble().setExp(-41); + if(!ex.isInf() && diff > softdouble::eps()) { - ASSERT_LT(diff/max(abs(ex), x), softdouble(1e-10)); + ASSERT_LT(diff/max(abs(ex), x), eps2); } } } TEST(Core_SoftFloat, cbrt32) { - vector inputs; + vector inputs; RNG rng(0); - inputs.push_back(0); - inputs.push_back(1); - inputs.push_back(FLT_MAX); - inputs.push_back(FLT_MIN); + inputs.push_back(softfloat::zero()); + inputs.push_back(softfloat::one()); + inputs.push_back(softfloat::max()); + inputs.push_back(softfloat::min()); for(int i = 0; i < 50000; i++) { Cv32suf x; x.fmt.sign = rng() % 2; x.fmt.exponent = rng() % 255; x.fmt.significand = rng() % (1 << 23); - inputs.push_back(x.f); + inputs.push_back(softfloat(x.f)); } for(size_t i = 0; i < inputs.size(); i++) { - float xf = inputs[i]; - softfloat x(xf); + softfloat x(inputs[i]); softfloat y = cbrt(x); ASSERT_TRUE(!y.isNaN()); ASSERT_TRUE(!y.isInf()); softfloat cube = y*y*y; softfloat diff = abs(x - cube); - const softfloat eps(FLT_EPSILON); + const softfloat eps = softfloat::eps(); if(diff > eps) { ASSERT_LT(diff/max(abs(x), abs(cube)), softfloat(4)*eps); @@ -3384,10 +3379,9 @@ TEST(Core_SoftFloat, pow32) // nan ** y == nan, if y != 0 for(size_t i = 0; i < nValues; i++) { - Cv32suf x; - x.u = rng(); - if(!x.u) x.f = FLT_MIN; - softfloat x32(x.f); + unsigned u = rng(); + softfloat x32 = softfloat::fromRaw(u); + x32 = (x32 != softfloat::zero()) ? x32 : softfloat::min(); ASSERT_TRUE(pow(nan, x32).isNaN()); } // nan ** 0 == 1 @@ -3514,10 +3508,9 @@ TEST(Core_SoftFloat, pow64) // nan ** y == nan, if y != 0 for(size_t i = 0; i < nValues; i++) { - Cv64suf x; - x.u = ((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng); - if(!x.u) x.f = DBL_MIN; - softdouble x64(x.f); + uint64 u = ((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng); + softdouble x64 = softdouble::fromRaw(u); + x64 = (x64 != softdouble::zero()) ? x64 : softdouble::min(); ASSERT_TRUE(pow(nan, x64).isNaN()); } // nan ** 0 == 1 @@ -3588,4 +3581,98 @@ TEST(Core_SoftFloat, pow64) } } +TEST(Core_SoftFloat, sincos64) +{ + static const softdouble + two = softdouble(2), three = softdouble(3), + half = softdouble::one()/two, + zero = softdouble::zero(), one = softdouble::one(), + pi = softdouble::pi(), piby2 = pi/two, eps = softdouble::eps(), + sin45 = sqrt(two)/two, sin60 = sqrt(three)/two; + + softdouble vstdAngles[] = + //x, sin(x), cos(x) + { + zero, zero, one, + pi/softdouble(6), half, sin60, + pi/softdouble(4), sin45, sin45, + pi/three, sin60, half, + }; + vector stdAngles; + stdAngles.assign(vstdAngles, vstdAngles + 3*4); + + static const softdouble stdEps = eps.setExp(-39); + const size_t nStdValues = 5000; + for(size_t i = 0; i < nStdValues; i++) + { + for(size_t k = 0; k < stdAngles.size()/3; k++) + { + softdouble x = stdAngles[k*3] + pi*softdouble(2*((int)i-(int)nStdValues/2)); + softdouble s = stdAngles[k*3+1]; + softdouble c = stdAngles[k*3+2]; + ASSERT_LE(abs(sin(x) - s), stdEps); + ASSERT_LE(abs(cos(x) - c), stdEps); + //sin(x+pi/2) = cos(x) + ASSERT_LE(abs(sin(x + piby2) - c), stdEps); + //sin(x+pi) = -sin(x) + ASSERT_LE(abs(sin(x + pi) + s), stdEps); + //cos(x+pi/2) = -sin(x) + ASSERT_LE(abs(cos(x+piby2) + s), stdEps); + //cos(x+pi) = -cos(x) + ASSERT_LE(abs(cos(x+pi) + c), stdEps); + } + } + + // sin(x) is NaN iff x ix NaN or Inf + ASSERT_TRUE(sin(softdouble::inf()).isNaN()); + ASSERT_TRUE(sin(softdouble::nan()).isNaN()); + + vector exponents; + exponents.push_back(0); + for(int i = 1; i < 52; i++) + { + exponents.push_back( i); + exponents.push_back(-i); + } + exponents.push_back(256); exponents.push_back(-256); + exponents.push_back(512); exponents.push_back(-512); + exponents.push_back(1022); exponents.push_back(-1022); + + vector inputs; + RNG rng(0); + + static const size_t nValues = 1 << 18; + for(size_t i = 0; i < nValues; i++) + { + softdouble x; + uint64 mantissa = (((long long int)((unsigned int)(rng)) << 32 ) | (unsigned int)(rng)) & ((1LL << 52) - 1); + x.v = mantissa; + x = x.setSign((rng() % 2) != 0); + x = x.setExp(exponents[rng() % exponents.size()]); + inputs.push_back(x); + } + + for(size_t i = 0; i < inputs.size(); i++) + { + softdouble x = inputs[i]; + + int xexp = x.getExp(); + softdouble randEps = eps.setExp(max(xexp-52, -46)); + softdouble sx = sin(x); + softdouble cx = cos(x); + ASSERT_FALSE(sx.isInf()); ASSERT_FALSE(cx.isInf()); + ASSERT_FALSE(sx.isNaN()); ASSERT_FALSE(cx.isNaN()); + ASSERT_LE(abs(sx), one); ASSERT_LE(abs(cx), one); + ASSERT_LE(abs((sx*sx + cx*cx) - one), eps); + ASSERT_LE(abs(sin(x*two) - two*sx*cx), randEps); + ASSERT_LE(abs(cos(x*two) - (cx*cx - sx*sx)), randEps); + ASSERT_LE(abs(sin(-x) + sx), eps); + ASSERT_LE(abs(cos(-x) - cx), eps); + ASSERT_LE(abs(sin(x + piby2) - cx), randEps); + ASSERT_LE(abs(sin(x + pi) + sx), randEps); + ASSERT_LE(abs(cos(x+piby2) + sx), randEps); + ASSERT_LE(abs(cos(x+pi) + cx), randEps); + } +} + /* End of file. */