From 97fe79a98280fbf7d61b0053a211cf1c25b625bb Mon Sep 17 00:00:00 2001 From: Fisher Date: Tue, 23 May 2023 17:57:53 +0800 Subject: [PATCH] [CINN] Enable check_cinn on some tests (#53710) * Enable check_cinn on some tests Tests: bitwise, compare, shape, assign_value, sum, expand_v2, lookup_table, lookup_table_v2 * Enable more CINN tests Tests with CINN: expand_v2, matmul, matmul_v2, mul, norm, one_hot_v2 Add target select in cinn_launch_op * Revert test_mul_op * Improve op unit tests --- paddle/fluid/operators/cinn/cinn_launch_op.cc | 6 ++ paddle/fluid/operators/cinn/cinn_launch_op.h | 4 ++ .../fluid/tests/unittests/CMakeLists.txt | 10 +++- .../tests/unittests/test_assign_value_op.py | 2 +- .../fluid/tests/unittests/test_bitwise_op.py | 8 +-- .../fluid/tests/unittests/test_compare_op.py | 4 +- .../tests/unittests/test_expand_v2_op.py | 20 +++---- .../tests/unittests/test_lookup_table_op.py | 28 ++++----- .../unittests/test_lookup_table_v2_op.py | 12 ++-- .../fluid/tests/unittests/test_matmul_op.py | 18 ++++-- .../tests/unittests/test_matmul_v2_op.py | 57 ++++++++++++++++--- .../fluid/tests/unittests/test_norm_op.py | 28 ++++++--- .../tests/unittests/test_one_hot_v2_op.py | 10 ++-- .../fluid/tests/unittests/test_shape_op.py | 4 +- .../fluid/tests/unittests/test_sum_op.py | 8 +-- 15 files changed, 152 insertions(+), 67 deletions(-) diff --git a/paddle/fluid/operators/cinn/cinn_launch_op.cc b/paddle/fluid/operators/cinn/cinn_launch_op.cc index ad74d88f70e..3ab9f6ba99b 100644 --- a/paddle/fluid/operators/cinn/cinn_launch_op.cc +++ b/paddle/fluid/operators/cinn/cinn_launch_op.cc @@ -17,6 +17,7 @@ #include #include +#include "cinn/common/target.h" #include "cinn/hlir/framework/graph_compiler.h" #include "cinn/runtime/cinn_runtime.h" #include "cinn/runtime/flags.h" @@ -94,6 +95,11 @@ void SetCinnRandomSeed() { ::cinn::runtime::RandomSeed::GetOrSet(seed); } +void SetCinnTarget(const ::cinn::common::Target& target) { + VLOG(4) << "Set CINN compile target to " << target; + ::cinn::runtime::CurrentTarget::SetCurrentTarget(target); +} + } // namespace details class CinnLaunchOp : public framework::OperatorWithKernel { diff --git a/paddle/fluid/operators/cinn/cinn_launch_op.h b/paddle/fluid/operators/cinn/cinn_launch_op.h index 59970412ea6..90751c72c60 100644 --- a/paddle/fluid/operators/cinn/cinn_launch_op.h +++ b/paddle/fluid/operators/cinn/cinn_launch_op.h @@ -58,6 +58,9 @@ void SetCinnRuntimeFlags(); template void SetCinnRandomSeed(); +// set CINN compile target +void SetCinnTarget(const ::cinn::common::Target& target); + } // namespace details template @@ -115,6 +118,7 @@ class CinnLaunchOpKernel : public framework::OpKernel { "Step 2. Get compilation result of the graph"); // Step 2. Get compilation result of the graph auto target = details::PlaceToCinnTarget(place); + details::SetCinnTarget(target); using ClockType = std::chrono::steady_clock; std::chrono::time_point start_t, end_t; if (VLOG_IS_ON(1)) { diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 257e3d1a6b7..d4c50707cbe 100755 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -1116,7 +1116,15 @@ set(TEST_CINN_OPS test_tile_op test_roll_op test_sum_op - test_elementwise_min_op) + test_elementwise_min_op + test_bitwise_op + test_compare_op + test_shape_op + test_assign_value_op + test_lookup_table_op + test_lookup_table_v2_op + test_norm_op + test_one_hot_v2_op) foreach(TEST_CINN_OPS ${TEST_CINN_OPS}) if(WITH_CINN) diff --git a/python/paddle/fluid/tests/unittests/test_assign_value_op.py b/python/paddle/fluid/tests/unittests/test_assign_value_op.py index 243dccc2422..7cb5dece346 100644 --- a/python/paddle/fluid/tests/unittests/test_assign_value_op.py +++ b/python/paddle/fluid/tests/unittests/test_assign_value_op.py @@ -49,7 +49,7 @@ class TestAssignValueOp(eager_op_test.OpTest): self.attrs["fp32_values"] = [float(v) for v in self.value.flat] def test_forward(self): - self.check_output() + self.check_output(check_cinn=True) class TestAssignValueOp2(TestAssignValueOp): diff --git a/python/paddle/fluid/tests/unittests/test_bitwise_op.py b/python/paddle/fluid/tests/unittests/test_bitwise_op.py index 084552e6b1a..728ea62dbf2 100644 --- a/python/paddle/fluid/tests/unittests/test_bitwise_op.py +++ b/python/paddle/fluid/tests/unittests/test_bitwise_op.py @@ -43,7 +43,7 @@ class TestBitwiseAnd(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): pass @@ -150,7 +150,7 @@ class TestBitwiseOr(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): pass @@ -258,7 +258,7 @@ class TestBitwiseXor(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): pass @@ -363,7 +363,7 @@ class TestBitwiseNot(OpTest): self.outputs = {'Out': out} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): pass diff --git a/python/paddle/fluid/tests/unittests/test_compare_op.py b/python/paddle/fluid/tests/unittests/test_compare_op.py index 0b8c4aa8eae..2f4e12f2b4e 100755 --- a/python/paddle/fluid/tests/unittests/test_compare_op.py +++ b/python/paddle/fluid/tests/unittests/test_compare_op.py @@ -35,7 +35,7 @@ def create_test_class(op_type, typename, callback): self.op_type = op_type def test_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_errors(self): paddle.enable_static() @@ -460,7 +460,7 @@ def create_bf16_case(op_type, callback): self.outputs = {'Out': real_result} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) cls_name = f"BF16TestCase_{op_type}" TestCompareOpBF16Op.__name__ = cls_name diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index 27fc92292f3..92cf190cb60 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -44,7 +44,7 @@ class TestExpandV2OpRank1(OpTest): self.expand_times = [1] def test_check_output(self): - self.check_output() + self.check_output(check_cinn=self.enable_cinn) def test_check_grad(self): self.check_grad(['X'], 'Out', check_prim=True) @@ -107,10 +107,10 @@ class TestExpandV2OpRank1_tensor_attr(OpTest): self.infer_expand_shape = [-1] def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_cinn=True) class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): @@ -144,10 +144,10 @@ class TestExpandV2OpRank1_tensor(OpTest): self.expand_shape = [2, 100] def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_cinn=True) # Situation 4: input x is Integer @@ -165,7 +165,7 @@ class TestExpandV2OpInteger(OpTest): self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) # Situation 5: input x is Bool @@ -181,7 +181,7 @@ class TestExpandV2OpBoolean(OpTest): self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) # Situation 6: input x is Integer @@ -199,7 +199,7 @@ class TestExpandV2OpInt64_t(OpTest): self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) # Situation 7: input x is Float16 @@ -218,7 +218,7 @@ class TestExpandV2FP16Op(OpTest): self.outputs = {'Out': output} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): self.check_grad(['X'], 'Out', check_prim=True) @@ -245,7 +245,7 @@ class TestExpandV2BF16Op(OpTest): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place) + self.check_output_with_place(place, check_cinn=True) def test_check_grad(self): place = core.CUDAPlace(0) diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py index de868db11fb..cd26f390747 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_op.py @@ -39,10 +39,10 @@ class TestLookupTableOp(OpTest): self.outputs = {'Out': table[ids]} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): - self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) + self.check_grad(['W'], 'Out', no_grad_set=set('Ids'), check_cinn=True) class TestLookupTableOpWithTensorIds(OpTest): @@ -56,10 +56,10 @@ class TestLookupTableOpWithTensorIds(OpTest): self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): - self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) + self.check_grad(['W'], 'Out', no_grad_set=set('Ids'), check_cinn=True) @skip_check_grad_ci( @@ -73,7 +73,7 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): padding_idx = np.random.choice(ids, 1)[0] self.outputs['Out'][ids == padding_idx] = np.zeros(31) self.attrs = {'padding_idx': int(padding_idx)} - self.check_output() + self.check_output(check_cinn=True) @skip_check_grad_ci( @@ -88,7 +88,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) self.attrs = {'padding_idx': padding_idx} - self.check_output() + self.check_output(check_cinn=True) class TestLookupTableWIsSelectedRows(unittest.TestCase): @@ -212,7 +212,7 @@ class TestLookupTableOpInt8(OpTest): self.outputs = {'Out': table[ids]} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): # since int8 type only be used in test and inference, there is @@ -233,7 +233,7 @@ class TestLookupTableOpWithTensorIdsInt8(OpTest): self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): # since int8 type only be used in test and inference, there is @@ -247,7 +247,7 @@ class TestLookupTableOpWithPaddingInt8(TestLookupTableOpInt8): padding_idx = np.random.choice(ids, 1)[0] self.outputs['Out'][ids == padding_idx] = np.zeros(31) self.attrs = {'padding_idx': int(padding_idx)} - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): # Since paddings are not trainable and fixed in forward, the gradient of @@ -264,7 +264,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt8( padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) self.attrs = {'padding_idx': padding_idx} - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): # Since paddings are not trainable and fixed in forward, the gradient of @@ -354,7 +354,7 @@ class TestLookupTableOpInt16(OpTest): self.outputs = {'Out': table[ids]} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) @skip_check_grad_ci(reason="Int16 type only be used in test and inference.") @@ -371,7 +371,7 @@ class TestLookupTableOpWithTensorIdsInt16(OpTest): self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) @skip_check_grad_ci(reason="Int16 type only be used in test and inference.") @@ -381,7 +381,7 @@ class TestLookupTableOpWithPaddingInt16(TestLookupTableOpInt16): padding_idx = np.random.choice(ids, 1)[0] self.outputs['Out'][ids == padding_idx] = np.zeros(31) self.attrs = {'padding_idx': int(padding_idx)} - self.check_output() + self.check_output(check_cinn=True) @skip_check_grad_ci(reason="Int16 type only be used in test and inference.") @@ -394,7 +394,7 @@ class TestLookupTableOpWithTensorIdsAndPaddingInt16( padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) self.attrs = {'padding_idx': padding_idx} - self.check_output() + self.check_output(check_cinn=True) class TestLookupTableWIsSelectedRowsInt16(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py index 54e9992a13d..b36f914a257 100644 --- a/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_lookup_table_v2_op.py @@ -56,10 +56,10 @@ class TestLookupTableOp(OpTest): return "int64" def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): - self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) + self.check_grad(['W'], 'Out', no_grad_set=set('Ids'), check_cinn=True) class TestLookupTableOpInt16(OpTest): @@ -87,10 +87,10 @@ class TestLookupTableOpWithTensorIds(OpTest): self.outputs = {'Out': table[ids.flatten()].reshape((2, 4, 5, 31))} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): - self.check_grad(['W'], 'Out', no_grad_set=set('Ids')) + self.check_grad(['W'], 'Out', no_grad_set=set('Ids'), check_cinn=True) @skip_check_grad_ci( @@ -104,7 +104,7 @@ class TestLookupTableOpWithPadding(TestLookupTableOp): padding_idx = np.random.choice(ids, 1)[0] self.outputs['Out'][ids == padding_idx] = np.zeros(31) self.attrs = {'padding_idx': int(padding_idx)} - self.check_output() + self.check_output(check_cinn=True) @skip_check_grad_ci( @@ -119,7 +119,7 @@ class TestLookupTableOpWithTensorIdsAndPadding(TestLookupTableOpWithTensorIds): padding_idx = np.random.choice(flatten_idx, 1)[0] self.outputs['Out'][np.squeeze(ids == padding_idx)] = np.zeros(31) self.attrs = {'padding_idx': padding_idx} - self.check_output() + self.check_output(check_cinn=True) class TestLookupTableWIsSelectedRows(unittest.TestCase): diff --git a/python/paddle/fluid/tests/unittests/test_matmul_op.py b/python/paddle/fluid/tests/unittests/test_matmul_op.py index 30085a841de..c7c870b3c46 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_op.py @@ -100,19 +100,29 @@ class Generator: self.outputs = {'Out': Out} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad_normal(self): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-3) + self.check_grad( + ['X', 'Y'], 'Out', max_relative_error=1e-3, check_cinn=True + ) def test_check_grad_ignore_x(self): self.check_grad( - ['Y'], 'Out', max_relative_error=1e-3, no_grad_set=set("X") + ['Y'], + 'Out', + max_relative_error=1e-3, + no_grad_set=set("X"), + check_cinn=True, ) def test_check_grad_ignore_y(self): self.check_grad( - ['X'], 'Out', max_relative_error=1e-3, no_grad_set=set('Y') + ['X'], + 'Out', + max_relative_error=1e-3, + no_grad_set=set('Y'), + check_cinn=True, ) diff --git a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py index a0c41b63b05..e0dcc3bfdd3 100644 --- a/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_matmul_v2_op.py @@ -103,13 +103,28 @@ class TestMatMulV2Op(OpTest): self.outputs = {'Out': result} def test_check_output(self): - self.check_output() + self.check_output( + check_cinn=self.check_cinn if hasattr(self, 'check_cinn') else True + ) def test_check_grad(self): if core.is_compiled_with_rocm(): - self.check_grad(['X', 'Y'], 'Out', max_relative_error=1e-2) + self.check_grad( + ['X', 'Y'], + 'Out', + max_relative_error=1e-2, + check_cinn=self.check_cinn + if hasattr(self, 'check_cinn') + else True, + ) else: - self.check_grad(['X', 'Y'], 'Out') + self.check_grad( + ['X', 'Y'], + 'Out', + check_cinn=self.check_cinn + if hasattr(self, 'check_cinn') + else True, + ) class TestMatMulOp2(TestMatMulV2Op): @@ -290,6 +305,7 @@ class TestMatMulOp16(TestMatMulV2Op): self.y_shape = (1, 2, 2, 100, 2) self.trans_x = False self.trans_y = False + self.check_cinn = False class TestMatMulOp17(TestMatMulV2Op): @@ -343,7 +359,13 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0): if core.is_compiled_with_cuda(): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_output_with_place(place, atol=atol) + self.check_output_with_place( + place, + atol=atol, + check_cinn=self.check_cinn + if hasattr(self, 'check_cinn') + else True, + ) def test_check_grad(self): place = core.CUDAPlace(0) @@ -353,6 +375,9 @@ def create_test_fp16_class(parent, atol=0.001, max_relative_error=1.0): ['X', 'Y'], 'Out', max_relative_error=max_relative_error, + check_cinn=self.check_cinn + if hasattr(self, 'check_cinn') + else True, ) cls_name = "{}_{}".format(parent.__name__, "Fp16") @@ -405,7 +430,13 @@ def create_test_bf16_class(parent, atol=0.01): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place, atol=atol) + self.check_output_with_place( + place, + atol=atol, + check_cinn=self.check_cinn + if hasattr(self, 'check_cinn') + else True, + ) def test_check_grad_x(self): place = core.CUDAPlace(0) @@ -416,6 +447,9 @@ def create_test_bf16_class(parent, atol=0.01): 'Out', no_grad_set={'Y'}, user_defined_grads=[numeric_grads], + check_cinn=self.check_cinn + if hasattr(self, 'check_cinn') + else True, ) def test_check_grad_y(self): @@ -427,6 +461,9 @@ def create_test_bf16_class(parent, atol=0.01): 'Out', no_grad_set={'X'}, user_defined_grads=[numeric_grads], + check_cinn=self.check_cinn + if hasattr(self, 'check_cinn') + else True, ) def test_check_grad(self): @@ -596,7 +633,7 @@ class TestComplexMatMulOp(OpTest): self.grad_y = np.matmul(np.conj(self.x).T, self.grad_out) def test_check_output(self): - self.check_output() + self.check_output(check_cinn=False) def test_check_grad_normal(self): self.check_grad( @@ -604,6 +641,7 @@ class TestComplexMatMulOp(OpTest): 'Out', user_defined_grads=[self.grad_x, self.grad_y], user_defined_grad_outputs=[self.grad_out], + check_cinn=False, ) def test_check_grad_ingore_x(self): @@ -613,6 +651,7 @@ class TestComplexMatMulOp(OpTest): no_grad_set=set("X"), user_defined_grads=[self.grad_y], user_defined_grad_outputs=[self.grad_out], + check_cinn=False, ) def test_check_grad_ingore_y(self): @@ -622,6 +661,7 @@ class TestComplexMatMulOp(OpTest): no_grad_set=set('Y'), user_defined_grads=[self.grad_x], user_defined_grad_outputs=[self.grad_out], + check_cinn=False, ) @@ -662,7 +702,7 @@ class TestComplexMatMulOpBroadcast(OpTest): ) def test_check_output(self): - self.check_output() + self.check_output(check_cinn=False) def test_check_grad_normal(self): self.check_grad( @@ -670,6 +710,7 @@ class TestComplexMatMulOpBroadcast(OpTest): 'Out', user_defined_grads=[self.grad_x, self.grad_y], user_defined_grad_outputs=[self.grad_out], + check_cinn=False, ) def test_check_grad_ingore_x(self): @@ -679,6 +720,7 @@ class TestComplexMatMulOpBroadcast(OpTest): no_grad_set=set("X"), user_defined_grads=[self.grad_y], user_defined_grad_outputs=[self.grad_out], + check_cinn=False, ) def test_check_grad_ingore_y(self): @@ -688,6 +730,7 @@ class TestComplexMatMulOpBroadcast(OpTest): no_grad_set=set('Y'), user_defined_grads=[self.grad_x], user_defined_grad_outputs=[self.grad_out], + check_cinn=False, ) diff --git a/python/paddle/fluid/tests/unittests/test_norm_op.py b/python/paddle/fluid/tests/unittests/test_norm_op.py index f87d5250f1c..3144ec189ed 100644 --- a/python/paddle/fluid/tests/unittests/test_norm_op.py +++ b/python/paddle/fluid/tests/unittests/test_norm_op.py @@ -48,10 +48,10 @@ class TestNormOp(OpTest): self.python_out_sig = ['Out'] def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_cinn=True) def init_test_case(self): self.shape = [2, 3, 4, 5] @@ -109,7 +109,7 @@ class TestNormOp6(TestNormOp): self.dtype = "float32" def test_check_grad(self): - self.check_grad(['X'], 'Out', max_relative_error=0.008) + self.check_grad(['X'], 'Out', max_relative_error=0.008, check_cinn=True) @unittest.skipIf( @@ -120,11 +120,17 @@ class TestNormOp7(TestNormOp): self.dtype = "float16" def test_check_output(self): - self.check_output_with_place(fluid.core.CUDAPlace(0), atol=5e-2) + self.check_output_with_place( + fluid.core.CUDAPlace(0), atol=5e-2, check_cinn=True + ) def test_check_grad(self): self.check_grad_with_place( - fluid.core.CUDAPlace(0), ['X'], 'Out', max_relative_error=0.05 + fluid.core.CUDAPlace(0), + ['X'], + 'Out', + max_relative_error=0.05, + check_cinn=True, ) @@ -147,7 +153,7 @@ class TestNormTestOp(OpTest): def test_check_output(self): # dynamic graph just supports float tensor - self.check_output(check_dygraph=True) + self.check_output(check_dygraph=True, check_cinn=True) def test_check_grad(self): pass @@ -176,11 +182,17 @@ class TestNormBF16Op(OpTest): self.python_out_sig = ['Out'] def test_check_output(self): - self.check_output_with_place(core.CUDAPlace(0), atol=1e-1) + self.check_output_with_place( + core.CUDAPlace(0), atol=1e-1, check_cinn=True + ) def test_check_grad(self): self.check_grad_with_place( - core.CUDAPlace(0), ['X'], 'Out', max_relative_error=1e-2 + core.CUDAPlace(0), + ['X'], + 'Out', + max_relative_error=1e-2, + check_cinn=True, ) def init_test_case(self): diff --git a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py index 173dae20ac6..a49060e536d 100644 --- a/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_one_hot_v2_op.py @@ -49,7 +49,7 @@ class TestOneHotOp(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) class TestOneHotOp_attr(OpTest): @@ -57,6 +57,7 @@ class TestOneHotOp_attr(OpTest): self.op_type = 'one_hot_v2' self.python_api = one_hot_wrapper depth = 10 + depth_np = np.array(10).astype('int32') dimension = 12 x_lod = [[4, 1, 3, 3]] x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] @@ -69,12 +70,12 @@ class TestOneHotOp_attr(OpTest): for i in range(np.product(x.shape)): out[i, 0, x[i]] = 1.0 - self.inputs = {'X': (x, x_lod)} + self.inputs = {'X': (x, x_lod), 'depth_tensor': depth_np} self.attrs = {'dtype': int(core.VarDesc.VarType.FP32), 'depth': depth} self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) class TestOneHotOp_default_dtype(OpTest): @@ -98,7 +99,7 @@ class TestOneHotOp_default_dtype(OpTest): self.outputs = {'Out': (out, x_lod)} def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) class TestOneHotOp_default_dtype_attr(OpTest): @@ -106,6 +107,7 @@ class TestOneHotOp_default_dtype_attr(OpTest): self.op_type = 'one_hot_v2' self.python_api = one_hot_wrapper depth = 10 + depth_np = np.array(depth).astype('int32') dimension = 12 x_lod = [[4, 1, 3, 3]] x = [np.random.randint(0, depth - 1) for i in range(sum(x_lod[0]))] diff --git a/python/paddle/fluid/tests/unittests/test_shape_op.py b/python/paddle/fluid/tests/unittests/test_shape_op.py index 3609370e73c..d9dade1cf99 100644 --- a/python/paddle/fluid/tests/unittests/test_shape_op.py +++ b/python/paddle/fluid/tests/unittests/test_shape_op.py @@ -36,7 +36,7 @@ class TestShapeOp(OpTest): self.dtype = np.float32 def test_check_output(self): - self.check_output() + self.check_output(check_cinn=True) class case1(TestShapeOp): @@ -125,7 +125,7 @@ class TestShapeOpBf16(OpTest): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place) + self.check_output_with_place(place, check_cinn=True) class case1Bf16(TestShapeOpBf16): diff --git a/python/paddle/fluid/tests/unittests/test_sum_op.py b/python/paddle/fluid/tests/unittests/test_sum_op.py index 99406f4599c..49e42b54342 100644 --- a/python/paddle/fluid/tests/unittests/test_sum_op.py +++ b/python/paddle/fluid/tests/unittests/test_sum_op.py @@ -62,10 +62,10 @@ class TestSumOp(OpTest): self.dtype = np.float64 def test_check_output(self): - self.check_output(check_prim=True) + self.check_output(check_prim=True, check_cinn=True) def test_check_grad(self): - self.check_grad(['x0'], 'Out', check_prim=True) + self.check_grad(['x0'], 'Out', check_prim=True, check_cinn=True) class TestSelectedRowsSumOp(unittest.TestCase): @@ -299,14 +299,14 @@ class TestFP16SumOp(TestSumOp): def test_check_output(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_output_with_place(place) + self.check_output_with_place(place, check_cinn=True) # FIXME: Because of the precision fp16, max_relative_error # should be 0.15 here. def test_check_grad(self): place = core.CUDAPlace(0) if core.is_float16_supported(place): - self.check_grad(['x0'], 'Out') + self.check_grad(['x0'], 'Out', check_cinn=True) def create_test_sum_fp16_class(parent): -- GitLab