diff --git a/python/paddle/fluid/tests/unittests/CMakeLists.txt b/python/paddle/fluid/tests/unittests/CMakeLists.txt index 272ca806747edad820f5e7f814f7c140a5ab5e0e..4a771990d91e10f6b7013aa201c85fd6e4a9f3ef 100755 --- a/python/paddle/fluid/tests/unittests/CMakeLists.txt +++ b/python/paddle/fluid/tests/unittests/CMakeLists.txt @@ -1077,7 +1077,7 @@ set_tests_properties(test_generator_dataloader PROPERTIES TIMEOUT 120) set_tests_properties(test_partial_concat_op PROPERTIES TIMEOUT 120) set_tests_properties(test_fuse_optimizer_pass PROPERTIES TIMEOUT 120) set_tests_properties(test_softmax_with_cross_entropy_op PROPERTIES TIMEOUT 120) -set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 120) +set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 500) set_tests_properties(test_adam_optimizer_fp32_fp64 PROPERTIES TIMEOUT 120) set_tests_properties(test_elementwise_nn_grad PROPERTIES TIMEOUT 120) set_tests_properties(test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass PROPERTIES TIMEOUT 120) diff --git a/python/paddle/fluid/tests/unittests/test_reduce_op.py b/python/paddle/fluid/tests/unittests/test_reduce_op.py index 69693f57bb2f3ca469cf7dbfd12364f4f68a294d..01d386724d1613b6523a322df97b8dcbc1cdf42a 100644 --- a/python/paddle/fluid/tests/unittests/test_reduce_op.py +++ b/python/paddle/fluid/tests/unittests/test_reduce_op.py @@ -26,19 +26,22 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_ class TestSumOp(OpTest): def setUp(self): + self.python_api = paddle.sum self.op_type = "reduce_sum" self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} + self.attrs = {'dim': [0]} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestSumOp_fp16(OpTest): def setUp(self): + self.python_api = paddle.sum self.op_type = "reduce_sum" self.inputs = { 'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16") @@ -50,7 +53,7 @@ class TestSumOp_fp16(OpTest): self.gradient = self.calc_gradient() def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def calc_gradient(self): x = self.inputs["X"] @@ -58,7 +61,8 @@ class TestSumOp_fp16(OpTest): return grad, def test_check_grad(self): - self.check_grad(['X'], 'Out', user_defined_grads=self.gradient) + self.check_grad( + ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True) @unittest.skipIf(not core.is_compiled_with_cuda(), @@ -66,6 +70,7 @@ class TestSumOp_fp16(OpTest): class TestSumOp_bf16(OpTest): def setUp(self): np.random.seed(100) + self.python_api = paddle.sum self.op_type = "reduce_sum" self.dtype = np.uint16 self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32) @@ -79,12 +84,15 @@ class TestSumOp_bf16(OpTest): def test_check_output(self): place = core.CUDAPlace(0) - self.check_output_with_place(place) + self.check_output_with_place(place, check_eager=True) def test_check_grad(self): place = core.CUDAPlace(0) self.check_grad_with_place( - place, ['X'], 'Out', user_defined_grads=self.gradient) + place, ['X'], + 'Out', + user_defined_grads=self.gradient, + check_eager=True) def calc_gradient(self): x = self.x @@ -94,6 +102,7 @@ class TestSumOp_bf16(OpTest): class TestSumOp_fp16_withInt(OpTest): def setUp(self): + self.python_api = paddle.sum self.op_type = "reduce_sum" self.inputs = { # ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format @@ -107,7 +116,7 @@ class TestSumOp_fp16_withInt(OpTest): self.gradient = self.calc_gradient() def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def calc_gradient(self): x = self.inputs["X"] @@ -115,41 +124,47 @@ class TestSumOp_fp16_withInt(OpTest): return grad, def test_check_grad(self): - self.check_grad(['X'], 'Out', user_defined_grads=self.gradient) + self.check_grad( + ['X'], 'Out', user_defined_grads=self.gradient, check_eager=True) class TestSumOp5D(OpTest): def setUp(self): + self.python_api = paddle.sum self.op_type = "reduce_sum" self.inputs = { 'X': np.random.random((1, 2, 5, 6, 10)).astype("float64") } + self.attrs = {'dim': [0]} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestSumOp6D(OpTest): def setUp(self): + self.python_api = paddle.sum self.op_type = "reduce_sum" self.inputs = { 'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64") } + self.attrs = {'dim': [0]} self.outputs = {'Out': self.inputs['X'].sum(axis=0)} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) class TestSumOp8D(OpTest): def setUp(self): + self.python_api = paddle.sum self.op_type = "reduce_sum" self.inputs = { 'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64") @@ -158,10 +173,10 @@ class TestSumOp8D(OpTest): self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))} def test_check_output(self): - self.check_output() + self.check_output(check_eager=True) def test_check_grad(self): - self.check_grad(['X'], 'Out') + self.check_grad(['X'], 'Out', check_eager=True) @skip_check_grad_ci( diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 3408dd7ce9384d7bdef53ff97df3e0d1254dedd4..d2ed985fb86516e5f78c953c769fc53f32c47de9 100755 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -904,7 +904,18 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None): return (False, src_type) dtype_flag, dtype = get_dtype(x, dtype) - if paddle.in_dynamic_mode(): + + if in_dygraph_mode(): + if reduce_all_flag: + axis = range(len(x.shape)) + else: + axis = axis if axis != None and axis != [] else [0] + + out_dtype = convert_np_dtype_to_dtype_(dtype) + out = _C_ops.final_state_sum(x, axis, out_dtype, keepdim) + return out + + if _in_legacy_dygraph(): axis = axis if axis != None and axis != [] else [0] if dtype_flag: return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim, diff --git a/python/paddle/utils/code_gen/api.yaml b/python/paddle/utils/code_gen/api.yaml index 2b0c562dbf9bdebffd84d25e898f31e194d95679..b137399b71c88054b9c426c8d73bc178707adb3b 100644 --- a/python/paddle/utils/code_gen/api.yaml +++ b/python/paddle/utils/code_gen/api.yaml @@ -1596,13 +1596,14 @@ # no_need_buffer : x, y - api : sum - args : (Tensor x, int64_t[] axis={}, DataType dtype=DataType::UNDEFINED, bool keep_dim=false) - output : Tensor + args : (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false) + output : Tensor(out) infer_meta : func : SumInferMeta kernel : func : sum data_type : x + backward : sum_grad # take_along_axis - api : take_along_axis diff --git a/python/paddle/utils/code_gen/backward.yaml b/python/paddle/utils/code_gen/backward.yaml index cbcfc02ea099207da3ed39442401e1c54ed570d9..c6951fa8fc1d4b9f25d48a28adba07e3c08fca25 100644 --- a/python/paddle/utils/code_gen/backward.yaml +++ b/python/paddle/utils/code_gen/backward.yaml @@ -1152,6 +1152,16 @@ kernel : func : subtract_grad +- backward_api : sum_grad + forward : sum (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false) -> Tensor(out) + args : (Tensor x, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all=false) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : sum_grad + - backward_api : take_along_axis_grad forward : take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out) args : (Tensor x, Tensor index, Tensor out_grad, int axis)