未验证 提交 d6b5d91c 编写于 作者: C Charles-hit 提交者: GitHub

support cumsum flip reverse backward refuse forward (#45892)

上级 bc2265f8
......@@ -592,9 +592,6 @@
- backward_api : cumsum_grad
forward : cumsum(Tensor x, Scalar axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
infer_meta :
func : UnchangedInferMeta
param : [x]
args : (Tensor out_grad, Scalar axis, bool flatten, bool exclusive, bool reverse)
output : Tensor(x_grad)
invoke : cumsum(out_grad, axis, flatten, exclusive, !reverse)
......@@ -884,11 +881,7 @@
forward : flip (Tensor x, int[] axis) -> Tensor(out)
args : (Tensor out_grad, int[] axis)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [out_grad]
kernel :
func : flip
invoke : flip(out_grad, axis)
- backward_api : floor_grad
forward : floor(Tensor x) -> Tensor(out)
......@@ -1971,8 +1964,6 @@
forward : reverse (Tensor x, IntArray axis) -> Tensor(out)
args : (Tensor out_grad, IntArray axis)
output : Tensor(x_grad)
infer_meta :
func : ReverseInferMeta
invoke : reverse(out_grad, axis)
- backward_api : roi_align_grad
......
......@@ -24,6 +24,9 @@ import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle.inference as paddle_infer
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
class TestCumsumOp(unittest.TestCase):
......@@ -380,5 +383,79 @@ class TestTensorAxis(unittest.TestCase):
np.testing.assert_allclose(static_out[0], infer_out)
class TestCumsumDoubleGradCheck(unittest.TestCase):
def cumsum_wrapper(self, x):
return paddle.cumsum(x[0], 0)
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float64
data = layers.data('data', [3, 4], False, dtype)
data.persistable = True
out = paddle.cumsum(data, 0)
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.double_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(self.cumsum_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestCumsumTripleGradCheck(unittest.TestCase):
def cumsum_wrapper(self, x):
return paddle.cumsum(x[0], 0)
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32
data = layers.data('data', [2, 3], False, dtype)
data.persistable = True
out = paddle.cumsum(data, 0)
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.triple_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.triple_grad_check_for_dygraph(self.cumsum_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
if __name__ == '__main__':
unittest.main()
......@@ -21,6 +21,9 @@ import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
from op_test import OpTest
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
class TestFlipOp_API(unittest.TestCase):
......@@ -137,6 +140,80 @@ class TestFlipOpNegAxis(TestFlipOp):
self.axis = [-1]
class TestFlipDoubleGradCheck(unittest.TestCase):
def flip_wrapper(self, x):
return paddle.flip(x[0], [0, 1])
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32
data = layers.data('data', [3, 2, 2], False, dtype)
data.persistable = True
out = paddle.flip(data, [0, 1])
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.double_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(self.flip_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestFlipTripleGradCheck(unittest.TestCase):
def flip_wrapper(self, x):
return paddle.flip(x[0], [0, 1])
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32
data = layers.data('data', [3, 2, 2], False, dtype)
data.persistable = True
out = paddle.flip(data, [0, 1])
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.triple_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.triple_grad_check_for_dygraph(self.flip_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
......@@ -21,6 +21,9 @@ from op_test import OpTest
import paddle
import paddle.fluid as fluid
from paddle.fluid import core
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
from paddle.fluid.framework import program_guard, Program
from test_attribute_var import UnittestBase
......@@ -267,6 +270,80 @@ class TestReverseAxisListTensor(TestReverseAxisTensor):
return out
class TestReverseDoubleGradCheck(unittest.TestCase):
def reverse_wrapper(self, x):
return fluid.layers.reverse(x[0], [0, 1])
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float64
data = layers.data('data', [3, 4], False, dtype)
data.persistable = True
out = fluid.layers.reverse(data, [0, 1])
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.double_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(self.reverse_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestReverseTripleGradCheck(unittest.TestCase):
def reverse_wrapper(self, x):
return fluid.layers.reverse(x[0], [0, 1])
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32
data = layers.data('data', [2, 3], False, dtype)
data.persistable = True
out = fluid.layers.reverse(data, [0, 1])
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.triple_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.triple_grad_check_for_dygraph(self.reverse_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
if __name__ == '__main__':
paddle.enable_static()
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册