diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 1c1ee56177d9dabe1557d6335bbec68d6e159429..a4e2c6495c724f723e50f52bf6ae401594920ca7 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -117,7 +117,7 @@ class TestSumOp1(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': 2} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=2)} @@ -134,7 +134,7 @@ class TestSumOp2(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': -1, 'reverse': True} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = { @@ -155,7 +155,7 @@ class TestSumOp3(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': 1} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} @@ -172,7 +172,7 @@ class TestSumOp4(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': 0} self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} @@ -189,7 +189,7 @@ class TestSumOp5(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.inputs = {'X': np.random.random((5, 20)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=1)} @@ -222,7 +222,7 @@ class TestSumOp7(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.inputs = {'X': np.random.random((100)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum(axis=0)} @@ -263,7 +263,7 @@ class TestSumOpExclusive1(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 20)).astype("float64") self.inputs = {'X': a} @@ -289,7 +289,7 @@ class TestSumOpExclusive2(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 100)).astype("float64") self.inputs = {'X': a} @@ -315,7 +315,7 @@ class TestSumOpExclusive3(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 20)).astype("float64") self.inputs = {'X': a} @@ -341,7 +341,7 @@ class TestSumOpExclusive4(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((1, 1, 100)).astype("float64") self.inputs = {'X': a} @@ -367,7 +367,7 @@ class TestSumOpExclusive5(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': 2, "exclusive": True} a = np.random.random((4, 5, 40)).astype("float64") self.inputs = {'X': a} @@ -419,7 +419,7 @@ class TestSumOpReverseExclusive(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False + self.enable_cinn = True self.attrs = {'axis': 2, 'reverse': True, "exclusive": True} a = np.random.random((4, 5, 6)).astype("float64") self.inputs = {'X': a} diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index d71f372074dafb1fbc37ad50be83601512f7b162..8fe5070d2f47361088e72d6ad636a9b9731216cd 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -65,7 +65,7 @@ class ElementwiseDivOp(OpTest): self.grad_y = grad_y def if_enable_cinn(self): - pass + self.enable_cinn = True def init_args(self): self.check_dygraph = True @@ -136,9 +136,6 @@ class TestElementwiseDivPrimOpFp32(ElementwiseDivOp): self.dtype = np.float32 self.val_dtype = np.float32 - def if_enable_cinn(self): - pass - class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp): def init_shape(self): @@ -208,6 +205,9 @@ class TestElementwiseDivOpBF16(ElementwiseDivOp): def if_check_prim(self): self.check_prim = False + def if_enable_cinn(self): + self.enable_cinn = False + @skip_check_grad_ci( reason="[skip shape check] Use y_shape(1) to test broadcast." @@ -375,9 +375,6 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp): def compute_gradient_x(self, grad_out, y): return np.sum(grad_out / y, axis=(0, 1)) - def if_enable_cinn(self): - self.enable_cinn = False - class TestElementwiseDivOpInt(ElementwiseDivOp): def init_dtype(self): @@ -400,7 +397,7 @@ class TestElementwiseDivOpFp16(ElementwiseDivOp): self.val_dtype = np.float16 def if_enable_cinn(self): - self.enable_cinn = False + self.enable_cinn = True class TestElementwiseDivBroadcast(unittest.TestCase):