提交 4f1aa5bc 编写于 作者: K Kexin Zhao

add test cases

上级 af37838e
......@@ -23,7 +23,7 @@ limitations under the License. */
#define USE_EIGEN
#ifdef USE_EIGEN // delete this #if macro
#include "Eigen/src/Core/arch/CUDA/Half.h"
#include "unsupported/Eigen/CXX11/Tensor"
#endif
#ifdef __GNUC__
......@@ -126,7 +126,7 @@ struct PADDLE_ALIGN(2) float16 {
// According to gcc, __fp16 can only be used as an argument to fp16
// intrinsic defined in arm_neon.h or as a storage type. It cannot
// be used as a formal function argument.
// TODO (kexinzhao): test it on RPI
// TODO(kexinzhao): test it on RPI
PADDLE_HOSTDEVICE inline float16(const float16_t* h) {
x = *reinterpret_cast<uint16_t*>(h);
}
......@@ -564,7 +564,7 @@ PADDLE_HOSTDEVICE inline bool operator>=(const float16& a, const float16& b) {
namespace fp16_impl {
Union Bits {
union Bits {
float f;
int32_t si;
uint32_t ui;
......@@ -584,7 +584,7 @@ constexpr int32_t maxC = maxN >> shift;
constexpr int32_t minC = minN >> shift;
constexpr int32_t sigC = sigN >> shiftSign;
const int32_t mulN = 0x52000000; //(1 << 23) / minN
const int32_t mulN = 0x52000000; // (1 << 23) / minN
const int32_t mulC = 0x33800000; // minN / (1 << (23 - shift))
const int32_t subC = 0x003FF; // max flt32 subnormal downshifted
const int32_t norC = 0x00400; // min flt32 normal downshifted
......@@ -693,7 +693,7 @@ PADDLE_HOSTDEVICE inline float half_to_float(float16 h) {
// Conversion routine adapted from
// http://stackoverflow.com/questions/1659440/32-bit-to-16-bit-floating-point-conversion
Bits v;
v.ui = x;
v.ui = h.x;
int32_t sign = v.si & sigC;
v.si ^= sign;
sign <<= shiftSign;
......@@ -711,6 +711,6 @@ PADDLE_HOSTDEVICE inline float half_to_float(float16 h) {
#endif
}
} // namespace half_impl
} // namespace fp16_impl
} // namespace paddle
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <gtest/gtest.h>
#include "paddle/math/float16.h"
namespace paddle {
#ifdef PADDLE_CUDA_FP16
TEST(float16, gpu) {
// Conversion to and from cuda half
float16 v1 = half(float16(1.0f));
EXPECT_EQ(v1.x, 0x3c00);
// Conversion to and from Eigen::half
float16 v2 = Eigen::half(float16(0.5f));
EXPECT_EQ(v2.x, 0x3800);
// Conversion from float
EXPECT_EQ(float16(1.0f).x, 0x3c00);
EXPECT_EQ(float16(0.5f).x, 0x3800);
EXPECT_EQ(float16(0.33333f).x, 0x3555);
EXPECT_EQ(float16(0.0f).x, 0x0000);
EXPECT_EQ(float16(-0.0f).x, 0x8000);
EXPECT_EQ(float16(65504.0f).x, 0x7bff);
EXPECT_EQ(float16(65536.0f).x, 0x7c00);
// Conversion from double
// Conversion from int
// Conversion from bool
}
TEST(float16, arithmetic_gpu) { EXPECT_EQ(float(float16(2) + float16(2)), 4); }
TEST(float16, comparison_gpu) { EXPECT_TRUE(float16(1.0f) > float16(0.5f)); }
#endif
TEST(float16, conversion_cpu) {
// Conversion to and from Eigen::half
EXPECT_EQ(float16(Eigen::half(float16(1.0f))).x, 0x3c00);
EXPECT_EQ(float16(Eigen::half(float16(0.5f))).x, 0x3800);
EXPECT_EQ(float16(Eigen::half(float16(0.33333f))).x, 0x3555);
EXPECT_EQ(float16(Eigen::half(float16(0.0f))).x, 0x0000);
EXPECT_EQ(float16(Eigen::half(float16(-0.0f))).x, 0x8000);
EXPECT_EQ(float16(Eigen::half(float16(65504.0f))).x, 0x7bff);
EXPECT_EQ(float16(Eigen::half(float16(65536.0f))).x, 0x7c00);
// Conversion from float
EXPECT_EQ(float16(1.0f).x, 0x3c00);
EXPECT_EQ(float16(0.5f).x, 0x3800);
EXPECT_EQ(float16(0.33333f).x, 0x3555);
EXPECT_EQ(float16(0.0f).x, 0x0000);
EXPECT_EQ(float16(-0.0f).x, 0x8000);
EXPECT_EQ(float16(65504.0f).x, 0x7bff);
EXPECT_EQ(float16(65536.0f).x, 0x7c00);
// Conversion from double
// Conversion from int
// Conversion from bool
}
TEST(float16, arithmetic_cpu) { EXPECT_EQ(float(float16(2) + float16(2)), 4); }
TEST(float16, comparison_cpu) { EXPECT_TRUE(float16(1.0f) > float16(0.5f)); }
} // namespace paddle
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册