diff --git a/paddle/math/float16.h b/paddle/math/float16.h index e9d4e6737dc145c3d2ab803bfe5bd84f6a7744b9..9c06b423ef61622ded752805a55be8253fba3cd9 100644 --- a/paddle/math/float16.h +++ b/paddle/math/float16.h @@ -23,7 +23,7 @@ limitations under the License. */ #define USE_EIGEN #ifdef USE_EIGEN // delete this #if macro -#include "Eigen/src/Core/arch/CUDA/Half.h" +#include "unsupported/Eigen/CXX11/Tensor" #endif #ifdef __GNUC__ @@ -126,7 +126,7 @@ struct PADDLE_ALIGN(2) float16 { // According to gcc, __fp16 can only be used as an argument to fp16 // intrinsic defined in arm_neon.h or as a storage type. It cannot // be used as a formal function argument. - // TODO (kexinzhao): test it on RPI + // TODO(kexinzhao): test it on RPI PADDLE_HOSTDEVICE inline float16(const float16_t* h) { x = *reinterpret_cast(h); } @@ -564,7 +564,7 @@ PADDLE_HOSTDEVICE inline bool operator>=(const float16& a, const float16& b) { namespace fp16_impl { -Union Bits { +union Bits { float f; int32_t si; uint32_t ui; @@ -584,7 +584,7 @@ constexpr int32_t maxC = maxN >> shift; constexpr int32_t minC = minN >> shift; constexpr int32_t sigC = sigN >> shiftSign; -const int32_t mulN = 0x52000000; //(1 << 23) / minN +const int32_t mulN = 0x52000000; // (1 << 23) / minN const int32_t mulC = 0x33800000; // minN / (1 << (23 - shift)) const int32_t subC = 0x003FF; // max flt32 subnormal downshifted const int32_t norC = 0x00400; // min flt32 normal downshifted @@ -693,7 +693,7 @@ PADDLE_HOSTDEVICE inline float half_to_float(float16 h) { // Conversion routine adapted from // http://stackoverflow.com/questions/1659440/32-bit-to-16-bit-floating-point-conversion Bits v; - v.ui = x; + v.ui = h.x; int32_t sign = v.si & sigC; v.si ^= sign; sign <<= shiftSign; @@ -711,6 +711,6 @@ PADDLE_HOSTDEVICE inline float half_to_float(float16 h) { #endif } -} // namespace half_impl +} // namespace fp16_impl } // namespace paddle diff --git a/paddle/math/tests/test_float16.cpp b/paddle/math/tests/test_float16.cpp new file mode 100644 index 0000000000000000000000000000000000000000..79f63d3a806ffb016d89368d665dc6bcea885080 --- /dev/null +++ b/paddle/math/tests/test_float16.cpp @@ -0,0 +1,78 @@ +/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "paddle/math/float16.h" + +namespace paddle { + +#ifdef PADDLE_CUDA_FP16 +TEST(float16, gpu) { + // Conversion to and from cuda half + float16 v1 = half(float16(1.0f)); + EXPECT_EQ(v1.x, 0x3c00); + + // Conversion to and from Eigen::half + float16 v2 = Eigen::half(float16(0.5f)); + EXPECT_EQ(v2.x, 0x3800); + + // Conversion from float + EXPECT_EQ(float16(1.0f).x, 0x3c00); + EXPECT_EQ(float16(0.5f).x, 0x3800); + EXPECT_EQ(float16(0.33333f).x, 0x3555); + EXPECT_EQ(float16(0.0f).x, 0x0000); + EXPECT_EQ(float16(-0.0f).x, 0x8000); + EXPECT_EQ(float16(65504.0f).x, 0x7bff); + EXPECT_EQ(float16(65536.0f).x, 0x7c00); + + // Conversion from double + + // Conversion from int + + // Conversion from bool +} + +TEST(float16, arithmetic_gpu) { EXPECT_EQ(float(float16(2) + float16(2)), 4); } + +TEST(float16, comparison_gpu) { EXPECT_TRUE(float16(1.0f) > float16(0.5f)); } +#endif + +TEST(float16, conversion_cpu) { + // Conversion to and from Eigen::half + EXPECT_EQ(float16(Eigen::half(float16(1.0f))).x, 0x3c00); + EXPECT_EQ(float16(Eigen::half(float16(0.5f))).x, 0x3800); + EXPECT_EQ(float16(Eigen::half(float16(0.33333f))).x, 0x3555); + EXPECT_EQ(float16(Eigen::half(float16(0.0f))).x, 0x0000); + EXPECT_EQ(float16(Eigen::half(float16(-0.0f))).x, 0x8000); + EXPECT_EQ(float16(Eigen::half(float16(65504.0f))).x, 0x7bff); + EXPECT_EQ(float16(Eigen::half(float16(65536.0f))).x, 0x7c00); + + // Conversion from float + EXPECT_EQ(float16(1.0f).x, 0x3c00); + EXPECT_EQ(float16(0.5f).x, 0x3800); + EXPECT_EQ(float16(0.33333f).x, 0x3555); + EXPECT_EQ(float16(0.0f).x, 0x0000); + EXPECT_EQ(float16(-0.0f).x, 0x8000); + EXPECT_EQ(float16(65504.0f).x, 0x7bff); + EXPECT_EQ(float16(65536.0f).x, 0x7c00); + + // Conversion from double + + // Conversion from int + + // Conversion from bool +} + +TEST(float16, arithmetic_cpu) { EXPECT_EQ(float(float16(2) + float16(2)), 4); } + +TEST(float16, comparison_cpu) { EXPECT_TRUE(float16(1.0f) > float16(0.5f)); } + +} // namespace paddle