未验证 提交 5936fa6e 编写于 作者: F From00 提交者: GitHub

Add yaml for reduce_sum OP (#41295)

* Add yaml for reduce_sum OP

* Fix CI errors

* Fix CI errors

* Fix CI errors

* Fix CI errors
上级 50f8e974
......@@ -1077,7 +1077,7 @@ set_tests_properties(test_generator_dataloader PROPERTIES TIMEOUT 120)
set_tests_properties(test_partial_concat_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_fuse_optimizer_pass PROPERTIES TIMEOUT 120)
set_tests_properties(test_softmax_with_cross_entropy_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 120)
set_tests_properties(test_reduce_op PROPERTIES TIMEOUT 500)
set_tests_properties(test_adam_optimizer_fp32_fp64 PROPERTIES TIMEOUT 120)
set_tests_properties(test_elementwise_nn_grad PROPERTIES TIMEOUT 120)
set_tests_properties(test_buffer_shared_memory_reuse_pass_and_fuse_optimization_op_pass PROPERTIES TIMEOUT 120)
......
......@@ -26,19 +26,22 @@ from paddle.fluid.framework import convert_np_dtype_to_dtype_
class TestSumOp(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {'X': np.random.random((5, 6, 10)).astype("float64")}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
self.attrs = {'dim': [0]}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestSumOp_fp16(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.uniform(0, 0.1, (5, 6, 10)).astype("float16")
......@@ -50,7 +53,7 @@ class TestSumOp_fp16(OpTest):
self.gradient = self.calc_gradient()
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def calc_gradient(self):
x = self.inputs["X"]
......@@ -58,7 +61,8 @@ class TestSumOp_fp16(OpTest):
return grad,
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
self.check_grad(
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)
@unittest.skipIf(not core.is_compiled_with_cuda(),
......@@ -66,6 +70,7 @@ class TestSumOp_fp16(OpTest):
class TestSumOp_bf16(OpTest):
def setUp(self):
np.random.seed(100)
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.dtype = np.uint16
self.x = np.random.uniform(0, 0.1, (2, 5, 10)).astype(np.float32)
......@@ -79,12 +84,15 @@ class TestSumOp_bf16(OpTest):
def test_check_output(self):
place = core.CUDAPlace(0)
self.check_output_with_place(place)
self.check_output_with_place(place, check_eager=True)
def test_check_grad(self):
place = core.CUDAPlace(0)
self.check_grad_with_place(
place, ['X'], 'Out', user_defined_grads=self.gradient)
place, ['X'],
'Out',
user_defined_grads=self.gradient,
check_eager=True)
def calc_gradient(self):
x = self.x
......@@ -94,6 +102,7 @@ class TestSumOp_bf16(OpTest):
class TestSumOp_fp16_withInt(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
# ref to https://en.wikipedia.org/wiki/Half-precision_floating-point_format
......@@ -107,7 +116,7 @@ class TestSumOp_fp16_withInt(OpTest):
self.gradient = self.calc_gradient()
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def calc_gradient(self):
x = self.inputs["X"]
......@@ -115,41 +124,47 @@ class TestSumOp_fp16_withInt(OpTest):
return grad,
def test_check_grad(self):
self.check_grad(['X'], 'Out', user_defined_grads=self.gradient)
self.check_grad(
['X'], 'Out', user_defined_grads=self.gradient, check_eager=True)
class TestSumOp5D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 2, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestSumOp6D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 1, 2, 5, 6, 10)).astype("float64")
}
self.attrs = {'dim': [0]}
self.outputs = {'Out': self.inputs['X'].sum(axis=0)}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
class TestSumOp8D(OpTest):
def setUp(self):
self.python_api = paddle.sum
self.op_type = "reduce_sum"
self.inputs = {
'X': np.random.random((1, 3, 1, 2, 1, 4, 3, 10)).astype("float64")
......@@ -158,10 +173,10 @@ class TestSumOp8D(OpTest):
self.outputs = {'Out': self.inputs['X'].sum(axis=(0, 3))}
def test_check_output(self):
self.check_output()
self.check_output(check_eager=True)
def test_check_grad(self):
self.check_grad(['X'], 'Out')
self.check_grad(['X'], 'Out', check_eager=True)
@skip_check_grad_ci(
......
......@@ -904,7 +904,18 @@ def sum(x, axis=None, dtype=None, keepdim=False, name=None):
return (False, src_type)
dtype_flag, dtype = get_dtype(x, dtype)
if paddle.in_dynamic_mode():
if in_dygraph_mode():
if reduce_all_flag:
axis = range(len(x.shape))
else:
axis = axis if axis != None and axis != [] else [0]
out_dtype = convert_np_dtype_to_dtype_(dtype)
out = _C_ops.final_state_sum(x, axis, out_dtype, keepdim)
return out
if _in_legacy_dygraph():
axis = axis if axis != None and axis != [] else [0]
if dtype_flag:
return _C_ops.reduce_sum(x, 'dim', axis, 'keep_dim', keepdim,
......
......@@ -1596,13 +1596,14 @@
# no_need_buffer : x, y
- api : sum
args : (Tensor x, int64_t[] axis={}, DataType dtype=DataType::UNDEFINED, bool keep_dim=false)
output : Tensor
args : (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false)
output : Tensor(out)
infer_meta :
func : SumInferMeta
kernel :
func : sum
data_type : x
backward : sum_grad
# take_along_axis
- api : take_along_axis
......
......@@ -1152,6 +1152,16 @@
kernel :
func : subtract_grad
- backward_api : sum_grad
forward : sum (Tensor x, int64_t[] dims={}, DataType out_dtype=paddle::experimental::DataType::UNDEFINED, bool keep_dim=false) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all=false)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : sum_grad
- backward_api : take_along_axis_grad
forward : take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
args : (Tensor x, Tensor index, Tensor out_grad, int axis)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册