未验证 提交 eeb4d165 编写于 作者: S superwinner1 提交者: GitHub

【Hackathon No.55】add erf FP16 test and BF16 test (#52136)

* add erf FP16 test
上级 ddcc1002
......@@ -11,6 +11,7 @@ distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/eigen/extensions.h"
......@@ -46,10 +47,11 @@ struct EigenErfGrad<Eigen::DefaultDevice, T> {
}
};
#define INSTANTIATION(FUNCTOR) \
template struct FUNCTOR<Eigen::DefaultDevice, float>; \
template struct FUNCTOR<Eigen::DefaultDevice, double>; \
template struct FUNCTOR<Eigen::DefaultDevice, dtype::float16>
#define INSTANTIATION(FUNCTOR) \
template struct FUNCTOR<Eigen::DefaultDevice, float>; \
template struct FUNCTOR<Eigen::DefaultDevice, double>; \
template struct FUNCTOR<Eigen::DefaultDevice, dtype::float16>; \
template struct FUNCTOR<Eigen::DefaultDevice, dtype::bfloat16>
INSTANTIATION(EigenErf);
INSTANTIATION(EigenErfGrad);
#undef INSTANTIATION
......
......@@ -14,6 +14,7 @@ limitations under the License. */
#ifndef _USE_MATH_DEFINES
#define _USE_MATH_DEFINES
#endif
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/funcs/eigen/eigen_function.h"
#include "paddle/phi/kernels/funcs/eigen/extensions.h"
......@@ -47,10 +48,11 @@ struct EigenErfGrad<Eigen::GpuDevice, T> {
}
};
#define INSTANTIATION(FUNCTOR) \
template struct FUNCTOR<Eigen::GpuDevice, float>; \
template struct FUNCTOR<Eigen::GpuDevice, double>; \
template struct FUNCTOR<Eigen::GpuDevice, dtype::float16>
#define INSTANTIATION(FUNCTOR) \
template struct FUNCTOR<Eigen::GpuDevice, float>; \
template struct FUNCTOR<Eigen::GpuDevice, double>; \
template struct FUNCTOR<Eigen::GpuDevice, dtype::float16>; \
template struct FUNCTOR<Eigen::GpuDevice, dtype::bfloat16>
INSTANTIATION(EigenErf);
INSTANTIATION(EigenErfGrad);
#undef INSTANTIATION
......
......@@ -15,6 +15,7 @@ limitations under the License. */
#include "paddle/phi/kernels/erf_grad_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/erf_grad_kernel_impl.h"
......@@ -25,4 +26,5 @@ PD_REGISTER_KERNEL(erf_grad,
phi::ErfGradKernel,
float,
double,
phi::dtype::float16) {}
phi::dtype::float16,
phi::dtype::bfloat16) {}
......@@ -15,9 +15,16 @@ limitations under the License. */
#include "paddle/phi/kernels/erf_kernel.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/impl/erf_kernel_impl.h"
PD_REGISTER_KERNEL(
erf, GPU, ALL_LAYOUT, phi::ErfKernel, float, double, phi::dtype::float16) {}
PD_REGISTER_KERNEL(erf,
GPU,
ALL_LAYOUT,
phi::ErfKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
......@@ -15,7 +15,7 @@
import unittest
import numpy as np
from eager_op_test import OpTest
from eager_op_test import OpTest, convert_float_to_uint16
from scipy.special import erf
import paddle
......@@ -68,5 +68,54 @@ class TestErfLayer(unittest.TestCase):
self.assertTrue('erf' in y.name)
class TestErfFP16OP(OpTest):
def setUp(self):
self.op_type = "erf"
self.prim_op_type = "prim"
self.public_python_api = paddle.erf
self.python_api = paddle.erf
self.dtype = np.float16
self.x_shape = [11, 17]
x = np.random.uniform(-1, 1, size=self.x_shape).astype(self.dtype)
y_ref = erf(x).astype(self.dtype)
self.inputs = {'X': x}
self.outputs = {'Out': y_ref}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(['X'], 'Out', check_prim=True)
@unittest.skipIf(
not paddle.fluid.core.is_compiled_with_cuda()
or not paddle.fluid.core.is_bfloat16_supported(
paddle.fluid.core.CUDAPlace(0)
),
"core is not complied with CUDA and not support the bfloat16",
)
class TestErfBF16OP(OpTest):
def setUp(self):
self.op_type = "erf"
self.prim_op_type = "prim"
self.public_python_api = paddle.erf
self.python_api = paddle.erf
self.dtype = np.uint16
self.x_shape = [11, 17]
x = np.random.uniform(-1, 1, size=self.x_shape).astype(np.float32)
y_ref = erf(x).astype(np.float32)
self.inputs = {'X': convert_float_to_uint16(x)}
self.outputs = {'Out': convert_float_to_uint16(y_ref)}
def test_check_output(self):
place = paddle.fluid.core.CUDAPlace(0)
self.check_output_with_place(place)
def test_check_grad(self):
place = paddle.fluid.core.CUDAPlace(0)
self.check_grad_with_place(place, ['X'], 'Out', check_prim=True)
if __name__ == '__main__':
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册