diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index d620b57124207e970db4c3b234234711c189c453..02ac8b4669e7edfebbd8b05d7905b82f6a9a980c 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -1510,8 +1510,11 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): dtype_flag = False if dtype is not None: - dtype_flag = True - dtype = convert_np_dtype_to_dtype_(dtype) + if paddle.ir.core._use_new_ir_api(): + dtype = paddle.ir.core.convert_np_dtype_to_dtype_(dtype) + else: + dtype_flag = True + dtype = convert_np_dtype_to_dtype_(dtype) if in_dynamic_mode(): return _C_ops.sum(x, axis, dtype, keepdim) diff --git a/test/legacy_test/eager_op_test.py b/test/legacy_test/eager_op_test.py index 3c50f3d6f5a6f73c5dee1f028d35e5db8b748ac1..95715c2dc84e1bb5b7ec55ac73c2ea85ebea31d7 100644 --- a/test/legacy_test/eager_op_test.py +++ b/test/legacy_test/eager_op_test.py @@ -1927,6 +1927,7 @@ class OpTest(unittest.TestCase): only_check_prim=False, inplace_atol=None, check_cinn=False, + check_new_ir=True, ): core._set_prim_all_enabled(False) core.set_prim_eager_enabled(False) @@ -2455,6 +2456,7 @@ class OpTest(unittest.TestCase): if ( self.op_type in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list + and check_new_ir ): if ( type(place) is paddle.fluid.libpaddle.CPUPlace @@ -2576,6 +2578,7 @@ class OpTest(unittest.TestCase): inplace_atol=None, check_cinn=False, only_check_prim=False, + check_new_ir=True, ): self.__class__.op_type = self.op_type if self.is_mkldnn_op(): @@ -2600,6 +2603,7 @@ class OpTest(unittest.TestCase): only_check_prim=only_check_prim, inplace_atol=inplace_atol, check_cinn=check_cinn, + check_new_ir=check_new_ir, ) if not res and only_check_prim: continue @@ -2766,6 +2770,7 @@ class OpTest(unittest.TestCase): only_check_prim=False, atol=1e-5, check_cinn=False, + check_new_ir=True, ): if hasattr(self, "use_custom_device") and self.use_custom_device: check_dygraph = False @@ -2788,6 +2793,7 @@ class OpTest(unittest.TestCase): only_check_prim=only_check_prim, atol=atol, check_cinn=check_cinn, + check_new_ir=check_new_ir, ) def check_grad_with_place( @@ -2807,6 +2813,7 @@ class OpTest(unittest.TestCase): numeric_place=None, atol=1e-5, check_cinn=False, + check_new_ir=True, ): if hasattr(self, "use_custom_device") and self.use_custom_device: check_dygraph = False @@ -3007,6 +3014,7 @@ class OpTest(unittest.TestCase): if ( self.op_type in new_ir_python_api_grad_white_list.new_ir_python_api_grad_white_list + and check_new_ir ): if ( type(place) is paddle.fluid.libpaddle.CPUPlace diff --git a/test/legacy_test/test_reduce_op.py b/test/legacy_test/test_reduce_op.py index 9617f39537b4be4f4dd0ddc8b12fd76c3e49e24e..4043cb8ccba2632d795abe5a71e331a794ced4f6 100644 --- a/test/legacy_test/test_reduce_op.py +++ b/test/legacy_test/test_reduce_op.py @@ -1180,7 +1180,11 @@ class Test3DReduce3(Test1DReduce): def reduce_sum_wrapper2(x, axis=[0], dtype=None, keepdim=False): - return paddle._C_ops.sum(x, axis, dtype, keepdim) + if paddle.in_dynamic_mode(): + return paddle._C_ops.sum(x, axis, dtype, keepdim) + else: + if paddle.ir.core._use_new_ir_api(): + return paddle._ir_ops.sum(x, axis, dtype, keepdim) class Test8DReduce0(Test1DReduce): diff --git a/test/mkldnn/test_reduce_bf16_mkldnn_op.py b/test/mkldnn/test_reduce_bf16_mkldnn_op.py index 7e2581d4c5933a7121b4961b0e986ff1c69e097a..5001a03372e75650993e07118086161e892d5977 100644 --- a/test/mkldnn/test_reduce_bf16_mkldnn_op.py +++ b/test/mkldnn/test_reduce_bf16_mkldnn_op.py @@ -40,7 +40,7 @@ class TestReduceSumDefaultBF16OneDNNOp(OpTest): self.attrs = {'use_mkldnn': self.use_mkldnn} def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_new_ir=False) def calculate_grads(self): tmp_tensor = np.zeros(self.x_fp32.shape).astype("float32") @@ -84,6 +84,7 @@ class TestReduceDefaultWithGradBF16OneDNNOp(TestReduceSumDefaultBF16OneDNNOp): check_dygraph=False, user_defined_grads=[self.grad_X], user_defined_grad_outputs=[convert_float_to_uint16(self.grad_Out)], + check_new_ir=False, ) diff --git a/test/mkldnn/test_reduce_mkldnn_op.py b/test/mkldnn/test_reduce_mkldnn_op.py index 015271dfc4bbb290f234735cc43214edb86f3ce7..8e0ae6274d92a4ae0fa084d927c0b94ab402ae84 100644 --- a/test/mkldnn/test_reduce_mkldnn_op.py +++ b/test/mkldnn/test_reduce_mkldnn_op.py @@ -29,12 +29,12 @@ class TestReduceSumDefaultOneDNNOp(OpTest): self.attrs = {'use_mkldnn': self.use_mkldnn} def test_check_output(self): - self.check_output(check_dygraph=False) + self.check_output(check_dygraph=False, check_new_ir=False) class TestReduceDefaultWithGradOneDNNOp(TestReduceSumDefaultOneDNNOp): def test_check_grad(self): - self.check_grad(['X'], 'Out', check_dygraph=False) + self.check_grad(['X'], 'Out', check_dygraph=False, check_new_ir=False) class TestReduceSum4DOneDNNOp(TestReduceDefaultWithGradOneDNNOp): diff --git a/test/white_list/new_ir_python_api_grad_white_list.py b/test/white_list/new_ir_python_api_grad_white_list.py index 81ab325a12aeb62151112102d91841a938f07241..b96ba7c4ff939924e36741fdefa8d0bfee51b716 100644 --- a/test/white_list/new_ir_python_api_grad_white_list.py +++ b/test/white_list/new_ir_python_api_grad_white_list.py @@ -14,4 +14,5 @@ new_ir_python_api_grad_white_list = [ "mean", + "reduce_sum", ]