未验证 提交 17fec4e9 编写于 作者: C cyberslack_lee 提交者: GitHub

【Hackathon4 No58】empty_like fp16&bf16 API test (#52668)

上级 3e66845f
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
import unittest import unittest
import numpy as np import numpy as np
from eager_op_test import convert_uint16_to_float
import paddle import paddle
from paddle.fluid import core from paddle.fluid import core
...@@ -38,7 +39,7 @@ class TestEmptyLikeAPICommon(unittest.TestCase): ...@@ -38,7 +39,7 @@ class TestEmptyLikeAPICommon(unittest.TestCase):
f'shape should be {self.dst_shape}, but get {shape}', f'shape should be {self.dst_shape}, but get {shape}',
) )
if data_type in ['float32', 'float64', 'int32', 'int64']: if data_type in ['float16', 'float32', 'float64', 'int32', 'int64']:
max_value = np.nanmax(out) max_value = np.nanmax(out)
min_value = np.nanmin(out) min_value = np.nanmin(out)
always_non_full_zero = max_value >= min_value always_non_full_zero = max_value >= min_value
...@@ -47,6 +48,16 @@ class TestEmptyLikeAPICommon(unittest.TestCase): ...@@ -47,6 +48,16 @@ class TestEmptyLikeAPICommon(unittest.TestCase):
always_full_zero or always_non_full_zero, always_full_zero or always_non_full_zero,
'always_full_zero or always_non_full_zero.', 'always_full_zero or always_non_full_zero.',
) )
elif data_type in ['uint16']:
uout = convert_uint16_to_float(out)
max_value = np.nanmax(uout)
min_value = np.nanmin(uout)
always_non_full_zero = max_value >= min_value
always_full_zero = max_value == 0.0 and min_value == 0.0
self.assertTrue(
always_full_zero or always_non_full_zero,
'always_full_zero or always_non_full_zero.',
)
elif data_type in ['bool']: elif data_type in ['bool']:
total_num = out.size total_num = out.size
true_num = np.sum(out) true_num = np.sum(out)
...@@ -154,16 +165,13 @@ class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon): ...@@ -154,16 +165,13 @@ class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon):
def test_static_graph(self): def test_static_graph(self):
paddle.enable_static() paddle.enable_static()
dtype = 'float32'
train_program = Program() train_program = Program()
startup_program = Program() startup_program = Program()
with program_guard(train_program, startup_program): with program_guard(train_program, startup_program):
x = np.random.random(self.x_shape).astype(dtype) x = np.random.random(self.x_shape).astype(self.dtype)
data_x = paddle.static.data( data_x = paddle.static.data(
'x', shape=self.data_x_shape, dtype=dtype 'x', shape=self.data_x_shape, dtype=self.dtype
) )
out = paddle.empty_like(data_x) out = paddle.empty_like(data_x)
...@@ -176,7 +184,7 @@ class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon): ...@@ -176,7 +184,7 @@ class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon):
exe = paddle.static.Executor(place) exe = paddle.static.Executor(place)
res = exe.run(train_program, feed={'x': x}, fetch_list=[out]) res = exe.run(train_program, feed={'x': x}, fetch_list=[out])
self.dst_dtype = dtype self.dst_dtype = self.dtype
self.dst_shape = x.shape self.dst_shape = x.shape
self.__check_out__(res[0]) self.__check_out__(res[0])
...@@ -185,12 +193,80 @@ class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon): ...@@ -185,12 +193,80 @@ class TestEmptyLikeAPI_Static(TestEmptyLikeAPICommon):
def init_config(self): def init_config(self):
self.x_shape = (200, 3) self.x_shape = (200, 3)
self.data_x_shape = [200, 3] self.data_x_shape = [200, 3]
self.dtype = 'float32'
class TestEmptyLikeAPI_Static2(TestEmptyLikeAPI_Static): class TestEmptyLikeAPI_Static2(TestEmptyLikeAPI_Static):
def init_config(self): def init_config(self):
self.x_shape = (3, 200, 3) self.x_shape = (3, 200, 3)
self.data_x_shape = [-1, 200, 3] self.data_x_shape = [-1, 200, 3]
self.dtype = 'float32'
class TestEmptyLikeAPI_StaticForFP16Op(TestEmptyLikeAPICommon):
def setUp(self):
self.init_config()
def init_config(self):
self.x_shape = (200, 3)
self.data_x_shape = [200, 3]
self.dtype = 'float16'
def test_static_graph(self):
paddle.enable_static()
if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x = np.random.random([200, 3]).astype(self.dtype)
data_x = paddle.static.data(
name="x", shape=[200, 3], dtype=self.dtype
)
out = paddle.empty_like(data_x)
exe = paddle.static.Executor(place)
res = exe.run(
paddle.static.default_main_program(),
feed={'x': x},
fetch_list=[out],
)
self.dst_dtype = self.dtype
self.dst_shape = x.shape
self.__check_out__(res[0])
class TestEmptyLikeAPI_StaticForBF16Op(TestEmptyLikeAPICommon):
def setUp(self):
self.init_config()
def init_config(self):
self.x_shape = (200, 3)
self.data_x_shape = [200, 3]
self.dtype = 'uint16'
def test_static_graph(self):
paddle.enable_static()
if paddle.fluid.core.is_compiled_with_cuda():
place = paddle.CUDAPlace(0)
with paddle.static.program_guard(
paddle.static.Program(), paddle.static.Program()
):
x = np.random.random([200, 3]).astype(np.uint16)
data_x = paddle.static.data(
name="x", shape=[200, 3], dtype=np.uint16
)
out = paddle.empty_like(data_x)
exe = paddle.static.Executor(place)
res = exe.run(
paddle.static.default_main_program(),
feed={'x': x},
fetch_list=[out],
)
self.dst_dtype = self.dtype
self.dst_shape = x.shape
self.__check_out__(res[0])
class TestEmptyError(unittest.TestCase): class TestEmptyError(unittest.TestCase):
......
...@@ -120,6 +120,7 @@ def shape(input): ...@@ -120,6 +120,7 @@ def shape(input):
'int64', 'int64',
'complex64', 'complex64',
'complex128', 'complex128',
'uint16',
], ],
'shape', 'shape',
) )
......
...@@ -1954,13 +1954,29 @@ def empty_like(x, dtype=None, name=None): ...@@ -1954,13 +1954,29 @@ def empty_like(x, dtype=None, name=None):
check_variable_and_dtype( check_variable_and_dtype(
x, x,
'x', 'x',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], [
'bool',
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'empty_like', 'empty_like',
) )
check_dtype( check_dtype(
dtype, dtype,
'dtype', 'dtype',
['bool', 'float16', 'float32', 'float64', 'int32', 'int64'], [
'bool',
'float16',
'float32',
'float64',
'int32',
'int64',
'uint16',
],
'empty_like', 'empty_like',
) )
out = helper.create_variable_for_type_inference(dtype=dtype) out = helper.create_variable_for_type_inference(dtype=dtype)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册