未验证 提交 7a8635d8 编写于 作者: C co63oc 提交者: GitHub

Add elementwise_heaviside tests (#53549)

上级 ee1aa69c
...@@ -139,6 +139,7 @@ PD_REGISTER_KERNEL(heaviside_grad, ...@@ -139,6 +139,7 @@ PD_REGISTER_KERNEL(heaviside_grad,
double, double,
int, int,
phi::dtype::float16, phi::dtype::float16,
phi::dtype::bfloat16,
int64_t) {} int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow_grad, PD_REGISTER_KERNEL(elementwise_pow_grad,
......
...@@ -177,5 +177,6 @@ PD_REGISTER_KERNEL(heaviside, ...@@ -177,5 +177,6 @@ PD_REGISTER_KERNEL(heaviside,
double, double,
int, int,
float16, float16,
bfloat16,
int64_t) {} int64_t) {}
#endif #endif
...@@ -15,15 +15,19 @@ ...@@ -15,15 +15,19 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import OpTest from eager_op_test import OpTest, convert_float_to_uint16
import paddle import paddle
from paddle.fluid import core
def Heaviside_grad(x, y, dout): def Heaviside_grad(x, y, dout, astype="float16", is_bfloat16=False):
tmp = np.zeros(x.shape).astype("float16") tmp = np.zeros(x.shape).astype(astype)
dx = np.multiply(tmp, dout) dx = np.multiply(tmp, dout)
dy = np.multiply(np.equal(x, 0), dout).astype("float16") dy = np.multiply(np.equal(x, 0), dout).astype(astype)
if is_bfloat16:
dx = convert_float_to_uint16(dx)
dy = convert_float_to_uint16(dy)
return dx, dy return dx, dy
...@@ -161,7 +165,7 @@ class TestHeavisideAPI_int32(TestHeavisideAPI_float64): ...@@ -161,7 +165,7 @@ class TestHeavisideAPI_int32(TestHeavisideAPI_float64):
self.dtype = "int32" self.dtype = "int32"
class TestHeavisideAPI_float16(OpTest): class TestHeavisideFP16Op(OpTest):
def setUp(self): def setUp(self):
self.dtype = np.float16 self.dtype = np.float16
self.op_type = "elementwise_heaviside" self.op_type = "elementwise_heaviside"
...@@ -185,6 +189,46 @@ class TestHeavisideAPI_float16(OpTest): ...@@ -185,6 +189,46 @@ class TestHeavisideAPI_float16(OpTest):
) )
@unittest.skipIf(
not core.is_compiled_with_cuda()
or not core.is_bfloat16_supported(core.CUDAPlace(0)),
"core is not compiled with CUDA or not support bfloat16",
)
class TestHeavisideBF16Op(OpTest):
def setUp(self):
self.dtype = np.uint16
self.np_dtype = np.float32
self.op_type = "elementwise_heaviside"
self.python_api = paddle.heaviside
self.inputs = {
'X': np.random.uniform(1, 2, [20, 5]).astype(self.np_dtype),
'Y': np.random.uniform(1, 2, [20, 5]).astype(self.np_dtype),
}
self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])}
self.place = core.CUDAPlace(0)
self.inputs['X'] = convert_float_to_uint16(self.inputs['X'])
self.inputs['Y'] = convert_float_to_uint16(self.inputs['Y'])
self.outputs['Out'] = convert_float_to_uint16(self.outputs['Out'])
def test_check_output(self):
self.check_output_with_place(self.place)
def test_check_grad(self):
self.check_grad_with_place(
self.place,
['X', 'Y'],
'Out',
user_defined_grads=Heaviside_grad(
self.inputs['X'],
self.inputs['Y'],
1 / self.inputs['X'].size,
self.np_dtype,
True,
),
)
class TestHeavisideError(unittest.TestCase): class TestHeavisideError(unittest.TestCase):
def test_input(self): def test_input(self):
paddle.disable_static() paddle.disable_static()
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册