diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index 2d17c3cb07c4ca302dc0a2a4a6247ad070d3b94e..0d649ced3a034ac57ed46fafba147141c5f026c0 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -120,7 +120,7 @@ class TestExpPrimFp32(OpTest): self.shape = [12, 17] def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True def set_only_prim(self): pass @@ -145,7 +145,7 @@ class TestExpPrimFp16(TestExpPrimFp32): self.check_grad(['X'], 'Out', check_prim=True) def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True class TestExpPrim_ZeroDim(TestExpPrimFp32): @@ -325,7 +325,7 @@ class TestSilu(TestActivation): def setUp(self): self.op_type = "silu" self.prim_op_type = "comp" - self.enable_cinn = False + self.enable_cinn = True self.python_api = paddle.nn.functional.silu self.init_dtype() self.init_shape() @@ -349,13 +349,14 @@ class TestSilu(TestActivation): class TestSilu_ZeroDim(TestSilu): def init_shape(self): self.shape = [] + self.enable_cinn = False class TestSiluFP16(TestActivation): def setUp(self): self.op_type = "silu" self.prim_op_type = "comp" - self.enable_cinn = False + self.enable_cinn = True self.only_prim = True self.python_api = paddle.nn.functional.silu self.init_dtype() @@ -1199,7 +1200,7 @@ class TestSqrtPrimFp32(TestActivation): self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)} self.outputs = {'Out': out} - self.enable_cinn = False + self.enable_cinn = True def test_check_grad(self): if self.dtype == np.float16: @@ -1216,11 +1217,13 @@ class TestSqrtPrimFp32(TestActivation): class TestSqrt_ZeroDim(TestSqrt): def init_shape(self): self.shape = [] + self.enable_cinn = False class TestSqrtPrim_ZeroDim(TestSqrt): def init_shape(self): self.shape = [] + self.enable_cinn = False def init_dtype(self): self.dtype = np.float32 @@ -1527,6 +1530,8 @@ class TestSin(TestActivation, TestParameter): self.op_type = "sin" self.init_dtype() self.init_shape() + # prim not support now + self.enable_cinn = False np.random.seed(1024) x = np.random.uniform(-1, 1, self.shape).astype(self.dtype) diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index 04ce818fbf6f338b85838635287f33b9ed4981b2..d2fa900d97a31801b53b51341c90f42809eed86f 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -35,7 +35,7 @@ class TestExpandV2OpRank1(OpTest): self.attrs = {'shape': self.shape} output = np.tile(self.inputs['X'], self.expand_times) self.outputs = {'Out': output} - self.enable_cinn = False + self.enable_cinn = True def init_data(self): self.ori_shape = [100] diff --git a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py index ec76306d587c77ff1afb61770b8ef279b834dec5..6abb573af2b30267eb859517cd2167ae0fad0282 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py @@ -61,7 +61,7 @@ class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp): self.value = 0.0 def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True @unittest.skipIf( @@ -96,7 +96,7 @@ class TestFillAnyLikeOpValue1(TestFillAnyLikeOp): self.value = 1.0 def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True class TestFillAnyLikeOpValue2(TestFillAnyLikeOp): @@ -104,7 +104,7 @@ class TestFillAnyLikeOpValue2(TestFillAnyLikeOp): self.value = 1e-10 def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True class TestFillAnyLikeOpValue3(TestFillAnyLikeOp): @@ -112,7 +112,7 @@ class TestFillAnyLikeOpValue3(TestFillAnyLikeOp): self.value = 1e-100 def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True class TestFillAnyLikeOpType(TestFillAnyLikeOp): @@ -136,7 +136,7 @@ class TestFillAnyLikeOpType(TestFillAnyLikeOp): self.skip_cinn() def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp): @@ -144,7 +144,7 @@ class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp): self.dtype = np.float16 def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py index a1af87b188550fa4fd69cb7c888f6e48a73c54db..86746c174e4933f408cd1d936aebb26e4a06a20e 100644 --- a/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py +++ b/python/paddle/fluid/tests/unittests/test_flatten_contiguous_range_op.py @@ -28,15 +28,18 @@ class TestFlattenOp(OpTest): self.prim_op_type = "comp" self.start_axis = 0 self.stop_axis = -1 + self.skip_cinn() self.init_test_case() self.inputs = {"X": np.random.random(self.in_shape).astype("float64")} self.init_attrs() - self.enable_cinn = False self.outputs = { "Out": self.inputs["X"].reshape(self.new_shape), "XShape": np.random.random(self.in_shape).astype("float32"), } + def skip_cinn(self): + self.enable_cinn = True + def test_check_output(self): self.check_output( no_check_set=["XShape"], check_eager=True, check_prim=True @@ -135,6 +138,9 @@ class TestFlattenOp_6(TestFlattenOp): self.stop_axis = -1 self.new_shape = (1,) + def skip_cinn(self): + self.enable_cinn = False + def init_attrs(self): self.attrs = { "start_axis": self.start_axis, diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index 17a19121b335d6d01bb8b0076c1cf23e435217bb..76363d60ed38524565ccdffbf3aab369dc053a9a 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -145,7 +145,7 @@ class TestFullLikeOp2(TestFullLikeOp1): self.dtype = np.float64 def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True class TestFullLikeOp3(TestFullLikeOp1): @@ -155,7 +155,7 @@ class TestFullLikeOp3(TestFullLikeOp1): self.dtype = np.int64 def skip_cinn(self): - self.enable_cinn = False + self.enable_cinn = True @unittest.skipIf( diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index caaec1e7dc3f5161b16c041d73e87959cee10f7f..67bae134dc16b1bdbb2413ae16d230353f1f02a6 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -32,8 +32,7 @@ class TestSumOp(OpTest): self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} self.attrs = {'dim': [0]} - # reduce doesn't support float64 in cinn - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output(check_eager=True) @@ -55,8 +54,7 @@ class TestSumOpFp32(OpTest): 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } self.gradient = self.calc_gradient() - # error occurred in cinn - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output(check_eager=True) @@ -151,7 +149,7 @@ class TestSumOp_fp16_withInt(OpTest): 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } self.gradient = self.calc_gradient() - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output(check_eager=True) @@ -182,7 +180,7 @@ class TestSumOp5D(OpTest): self.attrs = {'dim': [0]} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} # error occurred in cinn - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output(check_eager=True) @@ -202,7 +200,7 @@ class TestSumOp6D(OpTest): self.attrs = {'dim': [0]} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} # error occurred in cinn - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output(check_eager=True) @@ -678,8 +676,7 @@ class Test1DReduce(OpTest): self.prim_op_type = "prim" self.inputs = {'X': np.random.random(120).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - # reduce doesn't support float64 in cinn. - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output() @@ -696,8 +693,7 @@ class Test2DReduce0(Test1DReduce): self.attrs = {'dim': [0]} self.inputs = {'X': np.random.random((20, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - # reduce doesn't support float64 in cinn. - self.enable_cinn = False + self.enable_cinn = True class Test2DReduce1(Test1DReduce): @@ -710,8 +706,7 @@ class Test2DReduce1(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - # reduce doesn't support float64 in cinn. - self.enable_cinn = False + self.enable_cinn = True class Test3DReduce0(Test1DReduce): @@ -724,8 +719,7 @@ class Test3DReduce0(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - # reduce doesn't support float64 in cinn. - self.enable_cinn = False + self.enable_cinn = True class Test3DReduce1(Test1DReduce): @@ -738,8 +732,7 @@ class Test3DReduce1(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - # reduce doesn't support float64 in cinn. - self.enable_cinn = False + self.enable_cinn = True class Test3DReduce2(Test1DReduce): @@ -752,8 +745,7 @@ class Test3DReduce2(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - # reduce doesn't support float64 in cinn. - self.enable_cinn = False + self.enable_cinn = True class Test3DReduce3(Test1DReduce): @@ -766,8 +758,7 @@ class Test3DReduce3(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - # reduce doesn't support float64 in cinn. - self.enable_cinn = False + self.enable_cinn = True class Test8DReduce0(Test1DReduce): @@ -800,8 +791,7 @@ class TestKeepDimReduce(Test1DReduce): axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] ) } - # reduce doesn't support float64 in cinn. - self.enable_cinn = False + self.enable_cinn = True class TestKeepDim8DReduce(Test1DReduce): @@ -897,8 +887,7 @@ class TestReduceSumWithDimOne(OpTest): axis=tuple(self.attrs['dim']), keepdims=True ) } - # reduce doesn't support float64 in cinn - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output() @@ -919,8 +908,7 @@ class TestReduceSumWithNumelOne(OpTest): axis=tuple(self.attrs['dim']), keepdims=False ) } - # reduce doesn't support float64 in cinn - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output() @@ -937,8 +925,7 @@ class TestReduceAll(OpTest): self.inputs = {'X': np.random.random((100, 1, 1)).astype("float64")} self.attrs = {'reduce_all': True, 'keep_dim': False} self.outputs = {'Out': self.inputs['X'].sum()} - # reduce doesn't support float64 in cinn - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output() @@ -955,8 +942,7 @@ class TestReduceAllFp32(OpTest): self.inputs = {'X': np.random.random((100, 1, 1)).astype("float32")} self.attrs = {'reduce_all': True, 'keep_dim': False} self.outputs = {'Out': self.inputs['X'].sum()} - # reduce doesn't support float64 in cinn - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output() @@ -973,7 +959,7 @@ class Test1DReduceWithAxes1(OpTest): self.inputs = {'X': np.random.random(100).astype("float64")} self.attrs = {'dim': [0], 'keep_dim': False} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - self.enable_cinn = False + self.enable_cinn = True def test_check_output(self): self.check_output(check_prim=True) @@ -996,6 +982,7 @@ class TestReduceWithDtype(OpTest): 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), } ) + # cinn op_mapper not support in_dtype/out_dtype attr self.enable_cinn = False def test_check_output(self): @@ -1025,6 +1012,7 @@ class TestReduceWithDtype1(TestReduceWithDtype): 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), } ) + # cinn op_mapper not support in_dtype/out_dtype attr self.enable_cinn = False def test_check_output(self): @@ -1048,6 +1036,7 @@ class TestReduceWithDtype2(TestReduceWithDtype): 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), } ) + # cinn op_mapper not support in_dtype/out_dtype attr self.enable_cinn = False def test_check_output(self): diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index c5f7a3e969d1a9c8913d84b6ae4788767595ea40..893ec7366cb718cccd7046efd144a2947697484b 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -34,7 +34,7 @@ class TestSliceOp(OpTest): self.op_type = "slice" self.prim_op_type = "prim" self.python_api = paddle.slice - self.enable_cinn = False + self.enable_cinn = True self.config() self.inputs = {'Input': self.input} self.outputs = {'Out': self.out} @@ -74,7 +74,7 @@ class TestCase1(TestSliceOp): class TestCase2(TestSliceOp): def config(self): - self.enable_cinn = False + self.enable_cinn = True self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [-3, 0, 2] self.ends = [3, 100, -1] @@ -114,7 +114,7 @@ class TestSliceZerosShapeTensor(OpTest): # 1.2 with attr(decrease) class TestSliceOp_decs_dim(OpTest): def setUp(self): - self.enable_cinn = False + self.enable_cinn = True self.op_type = "slice" self.prim_op_type = "prim" self.python_api = paddle.slice @@ -149,7 +149,7 @@ class TestSliceOp_decs_dim(OpTest): class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): def config(self): - self.enable_cinn = False + self.enable_cinn = True self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [1, 0, 2] self.ends = [2, 1, 4] @@ -161,7 +161,7 @@ class TestSliceOp_decs_dim_2(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_3(TestSliceOp_decs_dim): def config(self): - self.enable_cinn = False + self.enable_cinn = True self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [-1, 0, 2] self.ends = [1000000, 1, 4] @@ -185,7 +185,7 @@ class TestSliceOp_decs_dim_4(TestSliceOp_decs_dim): class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): def config(self): - self.enable_cinn = False + self.enable_cinn = True self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [-1] self.ends = [1000000] @@ -198,7 +198,7 @@ class TestSliceOp_decs_dim_5(TestSliceOp_decs_dim): # test_6 with test_2 with test_3 class TestSliceOp_decs_dim_6(TestSliceOp_decs_dim): def config(self): - self.enable_cinn = False + self.enable_cinn = True self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [0, 1, 2, 3] self.ends = [1, 2, 3, 4] @@ -484,7 +484,7 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): ) class TestFP16(OpTest): def setUp(self): - self.enable_cinn = False + self.enable_cinn = True self.op_type = "slice" self.prim_op_type = "prim" self.python_api = paddle.slice diff --git a/python/paddle/fluid/tests/unittests/test_softmax_op.py b/python/paddle/fluid/tests/unittests/test_softmax_op.py index a7b673dd1fb93417702db412f10979b373488348..44f60ce045885cbe9460039809408370d860bbe9 100644 --- a/python/paddle/fluid/tests/unittests/test_softmax_op.py +++ b/python/paddle/fluid/tests/unittests/test_softmax_op.py @@ -73,7 +73,7 @@ class TestSoftmaxOp(OpTest): 'use_cudnn': self.use_cudnn, 'use_mkldnn': self.use_mkldnn, } - self.enable_cinn = False + self.enable_cinn = True def init_kernel_type(self): pass