From c6b7b2ad828f364d4aa151463baf2df29988fc62 Mon Sep 17 00:00:00 2001 From: Charles-hit <56987902+Charles-hit@users.noreply.github.com> Date: Wed, 28 Jun 2023 17:19:06 +0800 Subject: [PATCH] support some prim ops zero dim part2 (#54907) --- test/legacy_test/test_assign_op.py | 11 +++++++++- test/legacy_test/test_erf_op.py | 10 ++++++++- test/legacy_test/test_expand_as_v2_op.py | 25 ++++++++++++++++++++++ test/legacy_test/test_expand_v2_op.py | 27 ++++++++++++++++++++++-- test/legacy_test/test_flatten2_op.py | 7 ++++++ test/legacy_test/test_full_like_op.py | 7 ++++++ test/legacy_test/test_gather_nd_op.py | 27 ++++++++++++++++++++++++ test/legacy_test/test_reduce_op.py | 23 +++++++++++++++++--- 8 files changed, 130 insertions(+), 7 deletions(-) diff --git a/test/legacy_test/test_assign_op.py b/test/legacy_test/test_assign_op.py index 9299b07fc21..e42d29cb0b1 100644 --- a/test/legacy_test/test_assign_op.py +++ b/test/legacy_test/test_assign_op.py @@ -32,10 +32,14 @@ class TestAssignOp(eager_op_test.OpTest): self.public_python_api = paddle.assign self.op_type = "assign" self.prim_op_type = "prim" - x = np.random.random(size=(100, 10)).astype('float64') + self.init_input_configs() + x = np.random.random(size=self.shape).astype('float64') self.inputs = {'X': x} self.outputs = {'Out': x} + def init_input_configs(self): + self.shape = (100, 10) + def test_forward(self): paddle.enable_static() self.check_output() @@ -47,6 +51,11 @@ class TestAssignOp(eager_op_test.OpTest): paddle.disable_static() +class TestAssignOp_ZeroDim(TestAssignOp): + def init_input_configs(self): + self.shape = () + + @unittest.skipIf( not paddle.is_compiled_with_cuda(), "FP16 test runs only on GPU" ) diff --git a/test/legacy_test/test_erf_op.py b/test/legacy_test/test_erf_op.py index b560859cd41..23ccec74c23 100644 --- a/test/legacy_test/test_erf_op.py +++ b/test/legacy_test/test_erf_op.py @@ -30,12 +30,15 @@ class TestErfOp(OpTest): self.public_python_api = paddle.erf self.python_api = paddle.erf self.dtype = self._init_dtype() - self.x_shape = [11, 17] + self.init_shape() x = np.random.uniform(-1, 1, size=self.x_shape).astype(self.dtype) y_ref = erf(x).astype(self.dtype) self.inputs = {'X': x} self.outputs = {'Out': y_ref} + def init_shape(self): + self.x_shape = [11, 17] + def _init_dtype(self): return "float64" @@ -46,6 +49,11 @@ class TestErfOp(OpTest): self.check_grad(['X'], 'Out', check_prim=True) +class TestErfOp_ZeroDim(TestErfOp): + def init_shape(self): + self.x_shape = [] + + class TestErfLayer(unittest.TestCase): def _test_case(self, place): x = np.random.uniform(-1, 1, size=(11, 17)).astype(np.float64) diff --git a/test/legacy_test/test_expand_as_v2_op.py b/test/legacy_test/test_expand_as_v2_op.py index 990ea9be131..db866144eaf 100755 --- a/test/legacy_test/test_expand_as_v2_op.py +++ b/test/legacy_test/test_expand_as_v2_op.py @@ -54,6 +54,31 @@ class TestExpandAsBasic(OpTest): self.check_grad(['X'], 'Out', check_prim=True) +class TestExpandAs_ZeroDim1(TestExpandAsBasic): + def init_inputs_and_outputs(self): + x = np.random.random(()).astype(self.dtype) + target_tensor = np.random.random(1).astype(self.dtype) + self.inputs = {'X': x, "Y": target_tensor} + self.attrs = {'target_shape': target_tensor.shape} + bcast_dims = [1] + output = np.tile(self.inputs['X'], bcast_dims) + self.outputs = {'Out': output} + + +class TestExpandAs_ZeroDim2(TestExpandAsBasic): + def init_inputs_and_outputs(self): + x = np.random.random(()).astype(self.dtype) + target_tensor = np.random.random(()).astype(self.dtype) + self.inputs = {'X': x, "Y": target_tensor} + self.attrs = {'target_shape': target_tensor.shape} + bcast_dims = [] + output = np.tile(self.inputs['X'], bcast_dims) + self.outputs = {'Out': output} + + def if_enable_cinn(self): + self.enable_cinn = False + + @unittest.skipIf( not core.is_compiled_with_cuda() or not core.is_bfloat16_supported(core.CUDAPlace(0)), diff --git a/test/legacy_test/test_expand_v2_op.py b/test/legacy_test/test_expand_v2_op.py index 92cf190cb60..128bdda6da0 100644 --- a/test/legacy_test/test_expand_v2_op.py +++ b/test/legacy_test/test_expand_v2_op.py @@ -36,20 +36,43 @@ class TestExpandV2OpRank1(OpTest): self.attrs = {'shape': self.shape} output = np.tile(self.inputs['X'], self.expand_times) self.outputs = {'Out': output} - self.enable_cinn = True + self.if_enable_cinn() def init_data(self): self.ori_shape = [100] self.shape = [100] self.expand_times = [1] + def if_enable_cinn(self): + pass + def test_check_output(self): - self.check_output(check_cinn=self.enable_cinn) + self.check_output(check_cinn=True) def test_check_grad(self): self.check_grad(['X'], 'Out', check_prim=True) +class TestExpandV2OpRank1_ZeroDim1(TestExpandV2OpRank1): + def init_data(self): + self.ori_shape = [] + self.shape = [10] + self.expand_times = [10] + + def if_enable_cinn(self): + self.enable_cinn = False + + +class TestExpandV2OpRank1_ZeroDim2(TestExpandV2OpRank1): + def init_data(self): + self.ori_shape = [] + self.shape = [] + self.expand_times = [] + + def if_enable_cinn(self): + pass + + class TestExpandV2OpRank2_DimExpanding(TestExpandV2OpRank1): def init_data(self): self.ori_shape = [120] diff --git a/test/legacy_test/test_flatten2_op.py b/test/legacy_test/test_flatten2_op.py index 1b3ca5f9c9a..1981b3f4ab3 100644 --- a/test/legacy_test/test_flatten2_op.py +++ b/test/legacy_test/test_flatten2_op.py @@ -44,6 +44,13 @@ class TestFlattenOp(OpTest): self.attrs = {"axis": self.axis} +class TestFlattenOp_ZeroDim(TestFlattenOp): + def init_test_case(self): + self.in_shape = () + self.axis = 0 + self.new_shape = 1 + + class TestFlattenOp1(TestFlattenOp): def init_test_case(self): self.in_shape = (3, 2, 5, 4) diff --git a/test/legacy_test/test_full_like_op.py b/test/legacy_test/test_full_like_op.py index 028b1ad8914..d0c326d7b19 100644 --- a/test/legacy_test/test_full_like_op.py +++ b/test/legacy_test/test_full_like_op.py @@ -142,6 +142,13 @@ class TestFullLikeOp1(OpTest): pass +class TestFullLikeOp1_ZeroDim(TestFullLikeOp1): + def init_data(self): + self.fill_value = 5 + self.shape = [] + self.dtype = np.float32 + + class TestFullLikeOp2(TestFullLikeOp1): def init_data(self): self.fill_value = 1000 diff --git a/test/legacy_test/test_gather_nd_op.py b/test/legacy_test/test_gather_nd_op.py index 1c0526b4f1d..6102a0a8fcc 100644 --- a/test/legacy_test/test_gather_nd_op.py +++ b/test/legacy_test/test_gather_nd_op.py @@ -122,6 +122,33 @@ class TestGatherNdOpWithIndex1(OpTest): self.check_grad(['X'], 'Out', check_prim=True) +class TestGatherNdOpWithIndex1_ZeroDim(TestGatherNdOpWithIndex1): + def setUp(self): + self.op_type = "gather_nd" + self.prim_op_type = "prim" + self.python_api = paddle.gather_nd + self.public_python_api = paddle.gather_nd + self.config_dtype() + self.if_enable_cinn() + if self.dtype == np.float64: + target_dtype = "float64" + elif self.dtype == np.float16: + target_dtype = "float16" + else: + target_dtype = "float32" + xnp = np.random.random((100,)).astype(target_dtype) + index = np.array([1]).astype("int32") + output = xnp[index[-1]] + if self.dtype == np.uint16: + xnp = convert_float_to_uint16(xnp) + output = convert_float_to_uint16(output) + self.inputs = {'X': xnp, 'Index': index} + self.outputs = {'Out': output} + + def if_enable_cinn(self): + self.enable_cinn = False + + class TestGatherNdOpWithIndex1FP16(TestGatherNdOpWithIndex1): def config_dtype(self): self.dtype = np.float16 diff --git a/test/legacy_test/test_reduce_op.py b/test/legacy_test/test_reduce_op.py index 65dfa25b0cf..5875e959c35 100644 --- a/test/legacy_test/test_reduce_op.py +++ b/test/legacy_test/test_reduce_op.py @@ -279,15 +279,18 @@ class TestMaxOp_ZeroDim(OpTest): self.python_api = paddle.max self.public_python_api = paddle.max self.if_enable_cinn() + self.init_inputs_and_outputs() + + def if_enable_cinn(self): + self.enable_cinn = False + + def init_inputs_and_outputs(self): self.inputs = {'X': np.random.random([]).astype("float64")} self.attrs = {'dim': []} self.outputs = { 'Out': self.inputs['X'].max(axis=tuple(self.attrs['dim'])) } - def if_enable_cinn(self): - self.enable_cinn = False - def test_check_output(self): self.check_output() @@ -301,6 +304,20 @@ class TestMaxOp_ZeroDim(OpTest): ) +class TestMaxOp_ZeroDim1(TestMaxOp_ZeroDim): + def init_inputs_and_outputs(self): + self.inputs = {'X': np.random.random([5]).astype("float64")} + self.attrs = {'dim': [0]} + self.outputs = {'Out': self.inputs['X'].max(axis=(0,))} + + +class TestMaxOp_ZeroDim2(TestMaxOp_ZeroDim1): + def init_inputs_and_outputs(self): + self.inputs = {'X': np.random.random([5, 20]).astype("float64")} + self.attrs = {'dim': [0, 1]} + self.outputs = {'Out': self.inputs['X'].max(axis=(0, 1))} + + class TestMaxFP32Op(OpTest): """Remove Max with subgradient from gradient check to confirm the success of CI.""" -- GitLab