diff --git a/python/paddle/fluid/tests/unittests/test_activation_op.py b/python/paddle/fluid/tests/unittests/test_activation_op.py index b7e76ef816e12b29f344c0dd0e151f3a3bd869a9..9b92699db5c497af7ea82b9816ac082fb9352d61 100755 --- a/python/paddle/fluid/tests/unittests/test_activation_op.py +++ b/python/paddle/fluid/tests/unittests/test_activation_op.py @@ -52,6 +52,7 @@ class TestSqrtOpError(unittest.TestCase): class TestActivation(OpTest): def setUp(self): self.op_type = "exp" + self.prim_op_type = "prim" self.init_dtype() self.init_shape() self.init_kernel_type() @@ -131,20 +132,6 @@ class TestExpFp64_Prim(TestExpFp32_Prim): self.dtype = np.float64 -class TestExpFp16_Prim(TestExpFp32_Prim): - def init_dtype(self): - self.dtype = np.float16 - - def test_check_output(self): - self.check_output() - - def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True, only_check_prim=True) - - def if_skip_cinn(self): - self.enable_cinn = True - - class TestExpPrim_ZeroDim(TestExpFp32_Prim): def init_shape(self): self.shape = [] @@ -1230,7 +1217,6 @@ class TestSqrtBF16(OpTest): 'X': OpTest.np_dtype_to_fluid_dtype(convert_float_to_uint16(x)) } self.outputs = {'Out': convert_float_to_uint16(out)} - # TODO(wanghao107): add prim test self.enable_cinn = False def init_dtype(self): @@ -1245,7 +1231,9 @@ class TestSqrtBF16(OpTest): def test_check_grad(self): place = core.CUDAPlace(0) - self.check_grad_with_place(place, ['X'], 'Out', check_eager=True) + self.check_grad_with_place( + place, ['X'], 'Out', check_eager=True, check_prim=True + ) class TestRsqrt(TestActivation): @@ -2259,27 +2247,6 @@ class TestHardSwish_ZeroDim(TestHardSwish): self.shape = [] -class TestHardSwishFP16(TestHardSwish): - def setUp(self): - super().setUp() - self.enable_cinn = False - - def if_only_check_prim(self): - return True - - def init_dtype(self): - self.dtype = np.float16 - - -class TestHardSwish_ZeroDim_FP16(TestHardSwishFP16): - def setUp(self): - super().setUp() - self.enable_cinn = False - - def init_shape(self): - self.shape = [] - - class TestHardswishAPI(unittest.TestCase): # test paddle.nn.Hardswish, paddle.nn.functional.hardswish def setUp(self): @@ -3821,7 +3788,7 @@ def create_test_act_fp16_class( globals()[cls_name] = TestActFp16 -create_test_act_fp16_class(TestActivation) +create_test_act_fp16_class(TestActivation, check_prim=True) create_test_act_fp16_class(TestExpm1) create_test_act_fp16_class(TestSigmoid, check_prim=True) create_test_act_fp16_class(TestSilu, check_prim=True) @@ -3870,7 +3837,7 @@ create_test_act_fp16_class(TestSoftsign) create_test_act_fp16_class(TestThresholdedRelu) create_test_act_fp16_class(TestHardSigmoid) create_test_act_fp16_class(TestSwish, grad_atol=0.85) -create_test_act_fp16_class(TestHardSwish) +create_test_act_fp16_class(TestHardSwish, check_prim=True) create_test_act_fp16_class(TestMish, grad_atol=0.9) diff --git a/python/paddle/fluid/tests/unittests/test_cumsum_op.py b/python/paddle/fluid/tests/unittests/test_cumsum_op.py index 28e85dda837e9b112e9d4bfd396d5f6f4915007a..1c1ee56177d9dabe1557d6335bbec68d6e159429 100644 --- a/python/paddle/fluid/tests/unittests/test_cumsum_op.py +++ b/python/paddle/fluid/tests/unittests/test_cumsum_op.py @@ -205,10 +205,10 @@ class TestSumOp6(OpTest): self.op_type = "cumsum" self.prim_op_type = "prim" self.python_api = paddle.cumsum - self.enable_cinn = False self.attrs = {'axis': -1, 'flatten': True} self.inputs = {'X': np.random.random((5, 6, 5)).astype("float64")} self.outputs = {'Out': self.inputs['X'].cumsum()} + self.enable_cinn = False def test_check_output(self): self.check_output() @@ -394,13 +394,13 @@ class TestSumOpExclusiveFP16(OpTest): self.prim_op_type = "prim" self.python_api = paddle.cumsum self.enable_cinn = False - self.attrs = {'axis': 2, "exclusive": True, "dtype": "float16"} - a = np.random.random((4, 5, 20)).astype("float64") + self.attrs = {'axis': 2, "exclusive": True} + a = np.random.random((4, 5, 20)).astype("float16") self.inputs = {'X': a} self.outputs = { 'Out': np.concatenate( ( - np.zeros((4, 5, 1), dtype=np.float64), + np.zeros((4, 5, 1), dtype=np.float16), a[:, :, :-1].cumsum(axis=2), ), axis=2, @@ -443,13 +443,16 @@ class TestSumOpReverseExclusive(OpTest): class BadInputTest(unittest.TestCase): def test_error(self): + paddle.enable_static() with fluid.program_guard(fluid.Program()): def test_bad_x(): data = [1, 2, 4] result = paddle.cumsum(data, axis=0) - self.assertRaises(TypeError, test_bad_x) + with self.assertRaises(TypeError): + test_bad_x() + paddle.disable_static() class TestTensorAxis(unittest.TestCase): @@ -519,6 +522,7 @@ class TestTensorAxis(unittest.TestCase): class TestCumSumOpFp16(unittest.TestCase): def test_fp16(self): + paddle.enable_static() x_np = np.random.random((100, 100)).astype('float16') with paddle.static.program_guard(paddle.static.Program()): x = paddle.static.data(shape=[100, 100], name='x', dtype='float16') @@ -531,6 +535,7 @@ class TestCumSumOpFp16(unittest.TestCase): exe = paddle.static.Executor(place) exe.run(paddle.static.default_startup_program()) out = exe.run(feed={'x': x_np}, fetch_list=[y1, y2, y3, y4]) + paddle.disable_static() if __name__ == '__main__': diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py index 67891e20793f7fbf91577cca1ef8b6536a5b9690..cd767287a0f31ea735eeab8ece1efe666c0a8774 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_div_op.py @@ -65,7 +65,7 @@ class ElementwiseDivOp(OpTest): self.grad_y = grad_y def if_skip_cinn(self): - self.enable_cinn = False + pass def init_args(self): self.check_dygraph = True @@ -145,6 +145,9 @@ class TestElementwiseDivOp_ZeroDim1(ElementwiseDivOp): self.x_shape = [] self.y_shape = [] + def if_skip_cinn(self): + self.enable_cinn = False + class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp): def init_shape(self): @@ -160,6 +163,9 @@ class TestElementwiseDivOp_ZeroDim2(ElementwiseDivOp): def compute_gradient_y(self, grad_out, out, y): return np.sum(-1 * grad_out * out / y.reshape([1, 1])) + def if_skip_cinn(self): + self.enable_cinn = False + class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp): def init_shape(self): @@ -175,6 +181,9 @@ class TestElementwiseDivOp_ZeroDim3(ElementwiseDivOp): def compute_gradient_y(self, grad_out, out, y): return -1 * grad_out * out / y + def if_skip_cinn(self): + self.enable_cinn = False + @unittest.skipIf( not core.is_compiled_with_cuda() @@ -366,8 +375,11 @@ class TestElementwiseDivOpXsizeLessThanYsize(ElementwiseDivOp): def compute_gradient_x(self, grad_out, y): return np.sum(grad_out / y, axis=(0, 1)) + def if_skip_cinn(self): + self.enable_cinn = False + -class TestElementwiseDivOpInt(TestElementwiseDivOpNoPrim): +class TestElementwiseDivOpInt(ElementwiseDivOp): def init_dtype(self): self.dtype = np.int32 self.val_dtype = np.int32 diff --git a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py index 9fc835a9d0e7f6a4f555f455cc21194f4fd167c7..f36ab4d3c3c21665f1b5c1c457d042359ec7a3d9 100644 --- a/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py +++ b/python/paddle/fluid/tests/unittests/test_elementwise_mul_op.py @@ -350,7 +350,7 @@ class TestElementwiseMulOpFp16(ElementwiseMulOp): self.dtype = np.float16 def if_skip_cinn(self): - self.enable_cinn = False + pass class TestElementwiseMulOp_commonuse_1(ElementwiseMulOp): diff --git a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py index d2fa900d97a31801b53b51341c90f42809eed86f..b6b63b14c90fae62a7a1bfc33cdf153c23780c51 100644 --- a/python/paddle/fluid/tests/unittests/test_expand_v2_op.py +++ b/python/paddle/fluid/tests/unittests/test_expand_v2_op.py @@ -105,10 +105,10 @@ class TestExpandV2OpRank1_tensor_attr(OpTest): self.infer_expand_shape = [-1] def test_check_output(self): - self.check_output(check_prim=True) + self.check_output() def test_check_grad(self): - self.check_grad(['X'], 'Out', check_prim=True) + self.check_grad(['X'], 'Out') class TestExpandV2OpRank2_Corner_tensor_attr(TestExpandV2OpRank1_tensor_attr): diff --git a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py index 6abb573af2b30267eb859517cd2167ae0fad0282..20a746f20b3afe86417b583de73ae29cd06d413b 100644 --- a/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_fill_any_like_op.py @@ -25,8 +25,6 @@ import paddle.framework.dtype as dtypes def fill_any_like_wrapper(x, value, out_dtype=None, name=None): if isinstance(out_dtype, int): tmp_dtype = dtypes.dtype(out_dtype) - elif out_dtype == np.complex64: - raise ValueError("Not supported dtype %s" % out_dtype) else: tmp_dtype = out_dtype return paddle.full_like(x, value, tmp_dtype, name) @@ -43,7 +41,7 @@ class TestFillAnyLikeOp(OpTest): self.inputs = {'X': np.random.random((219, 232)).astype(self.dtype)} self.attrs = {'value': self.value} self.outputs = {'Out': self.value * np.ones_like(self.inputs["X"])} - self.skip_cinn() + self.if_skip_cinn() def init(self): pass @@ -51,7 +49,7 @@ class TestFillAnyLikeOp(OpTest): def test_check_output(self): self.check_output(check_prim=True) - def skip_cinn(self): + def if_skip_cinn(self): pass @@ -60,8 +58,8 @@ class TestFillAnyLikeOpFloat32(TestFillAnyLikeOp): self.dtype = np.float32 self.value = 0.0 - def skip_cinn(self): - self.enable_cinn = True + def if_skip_cinn(self): + pass @unittest.skipIf( @@ -81,13 +79,13 @@ class TestFillAnyLikeOpBfloat16(OpTest): self.value * np.ones_like(self.inputs["X"]) ) } - self.skip_cinn() + self.if_skip_cinn() def test_check_output(self): place = core.CUDAPlace(0) self.check_output_with_place(place, check_prim=True) - def skip_cinn(self): + def if_skip_cinn(self): self.enable_cinn = False @@ -95,24 +93,24 @@ class TestFillAnyLikeOpValue1(TestFillAnyLikeOp): def init(self): self.value = 1.0 - def skip_cinn(self): - self.enable_cinn = True + def if_skip_cinn(self): + pass class TestFillAnyLikeOpValue2(TestFillAnyLikeOp): def init(self): self.value = 1e-10 - def skip_cinn(self): - self.enable_cinn = True + def if_skip_cinn(self): + pass class TestFillAnyLikeOpValue3(TestFillAnyLikeOp): def init(self): self.value = 1e-100 - def skip_cinn(self): - self.enable_cinn = True + def if_skip_cinn(self): + pass class TestFillAnyLikeOpType(TestFillAnyLikeOp): @@ -133,18 +131,18 @@ class TestFillAnyLikeOpType(TestFillAnyLikeOp): * np.ones_like(self.inputs["X"]).astype(np.float32) } - self.skip_cinn() + self.if_skip_cinn() - def skip_cinn(self): - self.enable_cinn = True + def if_skip_cinn(self): + pass class TestFillAnyLikeOpFloat16(TestFillAnyLikeOp): def init(self): self.dtype = np.float16 - def skip_cinn(self): - self.enable_cinn = True + def if_skip_cinn(self): + pass if __name__ == "__main__": diff --git a/python/paddle/fluid/tests/unittests/test_full_like_op.py b/python/paddle/fluid/tests/unittests/test_full_like_op.py index 76363d60ed38524565ccdffbf3aab369dc053a9a..ca7a9f0197d74322ad434a13165bb27696afac80 100644 --- a/python/paddle/fluid/tests/unittests/test_full_like_op.py +++ b/python/paddle/fluid/tests/unittests/test_full_like_op.py @@ -27,8 +27,6 @@ from paddle.static import Program, program_guard def fill_any_like_wrapper(x, value, out_dtype=None, name=None): if isinstance(out_dtype, int): tmp_dtype = dtypes.dtype(out_dtype) - elif out_dtype == np.complex64: - raise ValueError("Not supported dtype %s" % out_dtype) else: tmp_dtype = out_dtype return paddle.full_like(x, value, tmp_dtype, name) @@ -114,7 +112,7 @@ class TestFullLikeOp1(OpTest): self.prim_op_type = "comp" self.python_api = fill_any_like_wrapper self.init_data() - self.skip_cinn() + self.if_skip_cinn() x = np.zeros(self.shape) out = np.full_like(x, self.fill_value, self.dtype) @@ -134,7 +132,7 @@ class TestFullLikeOp1(OpTest): def test_check_output(self): self.check_output(check_eager=True, check_prim=True) - def skip_cinn(self): + def if_skip_cinn(self): pass @@ -144,8 +142,8 @@ class TestFullLikeOp2(TestFullLikeOp1): self.shape = [1024, 1024] self.dtype = np.float64 - def skip_cinn(self): - self.enable_cinn = True + def if_skip_cinn(self): + pass class TestFullLikeOp3(TestFullLikeOp1): @@ -154,8 +152,8 @@ class TestFullLikeOp3(TestFullLikeOp1): self.shape = [5000, 5000] self.dtype = np.int64 - def skip_cinn(self): - self.enable_cinn = True + def if_skip_cinn(self): + pass @unittest.skipIf( diff --git a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py index 2b0a003f72b21dcaec9ddbc0d401c96d2f5d4a4b..4916a814a230be86916d8965d9c96f4357f89376 100644 --- a/python/paddle/fluid/tests/unittests/test_gather_nd_op.py +++ b/python/paddle/fluid/tests/unittests/test_gather_nd_op.py @@ -88,23 +88,32 @@ class TestGatherNdOpIndex1(OpTest): self.op_type = "gather_nd" self.prim_op_type = "prim" self.python_api = paddle.gather_nd - xnp = np.random.uniform(0, 100, (10, 10)).astype("float64") - index = np.array([1, 2]).astype("int32") + self.init_input() - self.inputs = {'X': xnp, 'Index': index} + self.inputs = {'X': self.xnp, 'Index': self.index} - self.outputs = {'Out': xnp[tuple(index.T)]} + self.outputs = {'Out': self.xnp[tuple(self.index.T)]} + self.enable_cinn = False + + def init_input(self): + self.xnp = np.random.uniform(0, 100, (10, 10)).astype("float64") + self.index = np.array([1, 2]).astype("int32") def test_check_output(self): self.check_output(check_eager=False) def test_check_grad(self): - self.check_grad(['X'], 'Out', check_eager=False) + self.check_grad(['X'], 'Out', check_eager=False, check_prim=True) + + +class TestGatherNdOpIndex1FP16(TestGatherNdOpIndex1): + def init_input(self): + self.xnp = np.random.uniform(0, 100, (10, 10)).astype("float16") + self.index = np.array([1, 2]).astype("int32") class TestGatherNdOpWithSameIndexAsX(OpTest): # Index has same rank as X's rank - def setUp(self): self.op_type = "gather_nd" self.prim_op_type = "prim" diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 14c1fef545f87b93095b2d050eaeaee77bb297b3..662c59d6d112e9a4764f5f1a2771a7384da9ae20 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -199,8 +199,6 @@ class TestSumOp6D(OpTest): } self.attrs = {'dim': [0]} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - # error occurred in cinn - self.enable_cinn = True def test_check_output(self): self.check_output(check_eager=True) @@ -693,7 +691,6 @@ class Test2DReduce0(Test1DReduce): self.attrs = {'dim': [0]} self.inputs = {'X': np.random.random((20, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} - self.enable_cinn = True class Test2DReduce1(Test1DReduce): @@ -706,7 +703,6 @@ class Test2DReduce1(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - self.enable_cinn = True class Test3DReduce0(Test1DReduce): @@ -719,7 +715,6 @@ class Test3DReduce0(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - self.enable_cinn = True class Test3DReduce1(Test1DReduce): @@ -732,7 +727,6 @@ class Test3DReduce1(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - self.enable_cinn = True class Test3DReduce2(Test1DReduce): @@ -745,7 +739,6 @@ class Test3DReduce2(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - self.enable_cinn = True class Test3DReduce3(Test1DReduce): @@ -758,7 +751,6 @@ class Test3DReduce3(Test1DReduce): self.outputs = { 'Out': self.inputs['X'].sum(axis=tuple(self.attrs['dim'])) } - self.enable_cinn = True class Test8DReduce0(Test1DReduce): @@ -791,7 +783,6 @@ class TestKeepDimReduce(Test1DReduce): axis=tuple(self.attrs['dim']), keepdims=self.attrs['keep_dim'] ) } - self.enable_cinn = True class TestKeepDim8DReduce(Test1DReduce): @@ -871,8 +862,7 @@ class TestKeepDimReduceSumMultiAxises(OpTest): self.check_output() def test_check_grad(self): - # rev_comp error - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_prim=True) class TestReduceSumWithDimOne(OpTest): @@ -962,16 +952,22 @@ class Test1DReduceWithAxes1(OpTest): self.enable_cinn = True def test_check_output(self): - self.check_output(check_prim=True) + self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out', check_prim=True) +def reduce_sum_wrapper( + x, axis=None, dtype_rename=None, keepdim=False, name=None +): + return paddle.sum(x, axis, "float64", keepdim, name) + + class TestReduceWithDtype(OpTest): def setUp(self): self.op_type = "reduce_sum" - self.python_api = paddle.sum + self.python_api = reduce_sum_wrapper self.prim_op_type = "prim" self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum().astype('float64')} @@ -982,22 +978,14 @@ class TestReduceWithDtype(OpTest): 'out_dtype': int(convert_np_dtype_to_dtype_(np.float64)), } ) - # cinn op_mapper not support in_dtype/out_dtype attr - self.enable_cinn = False def test_check_output(self): - self.check_output(check_prim=True) + self.check_output() def test_check_grad(self): self.check_grad(['X'], 'Out', check_prim=True) -def reduce_sum_wrapper( - x, axis=None, dtype_rename=None, keepdim=False, name=None -): - return paddle.sum(x, axis, "float64", keepdim, name) - - class TestReduceWithDtype1(TestReduceWithDtype): def setUp(self): self.op_type = "reduce_sum" diff --git a/python/paddle/fluid/tests/unittests/test_slice_op.py b/python/paddle/fluid/tests/unittests/test_slice_op.py index ae7528b49005c7a01694144828eba03de7c71c71..191eef921f999aa4e7fe8494d41477b3af7c7e93 100644 --- a/python/paddle/fluid/tests/unittests/test_slice_op.py +++ b/python/paddle/fluid/tests/unittests/test_slice_op.py @@ -34,7 +34,6 @@ class TestSliceOp(OpTest): self.op_type = "slice" self.prim_op_type = "prim" self.python_api = paddle.slice - self.enable_cinn = True self.config() self.inputs = {'Input': self.input} self.outputs = {'Out': self.out} @@ -54,7 +53,7 @@ class TestSliceOp(OpTest): self.out = self.input[1:3, 0:3, 2:4, :] def test_check_output(self): - self.check_output(check_prim=True) + self.check_output() def test_check_grad_normal(self): self.check_grad( @@ -74,7 +73,6 @@ class TestCase1(TestSliceOp): class TestCase2(TestSliceOp): def config(self): - self.enable_cinn = True self.input = np.random.random([3, 4, 5, 6]).astype("float64") self.starts = [-3, 0, 2] self.ends = [3, 100, -1] @@ -139,7 +137,7 @@ class TestSliceOp_decs_dim(OpTest): self.out = self.input[1, 0:3, 2:4, :] def test_check_output(self): - self.check_output(check_prim=True) + self.check_output() def test_check_grad_normal(self): self.check_grad( @@ -465,7 +463,6 @@ class TestSliceOp_starts_OneTensor_ends_ListTensor(OpTest): ) class TestFP16(OpTest): def setUp(self): - self.enable_cinn = True self.op_type = "slice" self.prim_op_type = "prim" self.python_api = paddle.slice @@ -578,6 +575,7 @@ class TestBF16(OpTest): def test_check_output(self): self.check_output() + # pad not support bfloat16, so we can't test prim. def test_check_grad_normal(self): self.check_grad(['Input'], 'Out') diff --git a/python/paddle/tensor/ops.py b/python/paddle/tensor/ops.py index e83d04f3c02684d00764afd24cde90a501321918..7e3ccf3305462ac044dcde6449fa41845080b911 100644 --- a/python/paddle/tensor/ops.py +++ b/python/paddle/tensor/ops.py @@ -919,7 +919,7 @@ def sqrt(x, name=None): return _C_ops.sqrt(x) else: check_variable_and_dtype( - x, 'x', ['float16', 'float32', 'float64'], 'sqrt' + x, 'x', ['float16', 'uint16', 'float32', 'float64'], 'sqrt' ) helper = LayerHelper('sqrt', **locals()) out = helper.create_variable_for_type_inference(dtype=x.dtype)