未验证 提交 bf44034c 编写于 作者: C chentianyu03 提交者: GitHub

[Yaml]Tile and expand double grad (#42680)

* add tile double_grad yaml and test case

* add expand double yaml and test case

* add clip dobule grad yaml and test case

* add concat dobule grad yaml and test case
上级 ba71fbea
......@@ -157,6 +157,9 @@ class TestExpandDoubleGradCheck(unittest.TestCase):
class TestTileDoubleGradCheck(unittest.TestCase):
def tile_wrapper(self, x):
return paddle.tile(x[0], [4, 9])
@prog_scope()
def func(self, place):
x_shape = [3, 12]
......@@ -171,6 +174,8 @@ class TestTileDoubleGradCheck(unittest.TestCase):
gradient_checker.double_grad_check(
[x], out, x_init=x_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.tile_wrapper, [x], out, x_init=x_arr, place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
......@@ -181,6 +186,9 @@ class TestTileDoubleGradCheck(unittest.TestCase):
class TestExpandV2DoubleGradCheck(unittest.TestCase):
def expand_wrapper(self, x):
return paddle.expand(x[0], [4, 12])
@prog_scope()
def func(self, place):
x_shape = [1, 12]
......@@ -195,6 +203,8 @@ class TestExpandV2DoubleGradCheck(unittest.TestCase):
gradient_checker.double_grad_check(
[x], out, x_init=x_arr, place=place, eps=eps)
gradient_checker.double_grad_check_for_dygraph(
self.expand_wrapper, [x], out, x_init=x_arr, place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
......@@ -253,6 +263,9 @@ class TestUnsqueezeDoubleGradCheck(unittest.TestCase):
class TestClipDoubleGradCheck(unittest.TestCase):
def clip_wrapper(self, x):
return paddle.clip(x[0], min=-1., max=1.)
@prog_scope()
def func(self, place):
x_shape = [2, 4, 10]
......@@ -264,6 +277,8 @@ class TestClipDoubleGradCheck(unittest.TestCase):
x_arr = np.random.uniform(-5., 5., x_shape).astype(dtype)
gradient_checker.double_grad_check([x], out, x_init=x_arr, place=place)
gradient_checker.double_grad_check_for_dygraph(
self.clip_wrapper, [x], out, x_init=x_arr, place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
......@@ -357,6 +372,9 @@ class TestConstantPadDoubleGradCheckCase1(TestConstantPadDoubleGradCheck):
class TestConcatDoubleGradCheck(unittest.TestCase):
def concat_wrapper(self, x):
return paddle.concat(x, axis=0)
@prog_scope()
def func(self, place):
x_shape = [2, 3, 4, 5]
......@@ -373,6 +391,11 @@ class TestConcatDoubleGradCheck(unittest.TestCase):
gradient_checker.double_grad_check(
[x1, x2], out, x_init=[x1_arr, x2_arr], place=place)
gradient_checker.double_grad_check_for_dygraph(
self.concat_wrapper, [x1, x2],
out,
x_init=[x1_arr, x2_arr],
place=place)
def test_grad(self):
places = [fluid.CPUPlace()]
......
......@@ -251,6 +251,16 @@
kernel :
func : cholesky_solve_grad
- backward_api : clip_double_grad
forward : clip_grad (Tensor x, Tensor grad_out, Scalar min = 0., Scalar max = 0.) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_x_grad, Scalar min = 0., Scalar max = 0.)
output : Tensor(grad_out_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : clip_grad
- backward_api : clip_grad
forward : clip (Tensor x, Scalar min, Scalar max) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar min = 0., Scalar max = 0.)
......@@ -260,6 +270,18 @@
param : [x]
kernel :
func : clip_grad
backward : clip_double_grad
- backward_api : concat_double_grad
forward : concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x)
args : (Tensor[] grad_x_grad, Scalar axis = 0)
output : Tensor(grad_out_grad)
infer_meta :
func : ConcatInferMeta
param : [grad_x_grad, axis]
kernel :
func : concat
no_need_buffer : x
- backward_api : concat_grad
forward : concat (Tensor[] x, Scalar axis) -> Tensor(out)
......@@ -271,6 +293,7 @@
kernel :
func : concat_grad
no_need_buffer : x
backward : concat_double_grad
- backward_api : conj_grad
forward : conj (Tensor x) -> Tensor(out)
......@@ -582,6 +605,15 @@
func : expand_as_grad
no_need_buffer : x
- backward_api : expand_double_grad
forward : expand_grad (Tensor x, Tensor grad_out, IntArray shape) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray shape)
output : Tensor(grad_out_grad)
infer_meta :
func : ExpandInferMeta
kernel :
func : expand
- backward_api : expand_grad
forward : expand (Tensor x, IntArray shape) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray shape)
......@@ -592,6 +624,7 @@
kernel :
func : expand_grad
no_need_buffer : x
backward : expand_double_grad
- backward_api : expm1_grad
forward : expm1 (Tensor x) -> Tensor(out)
......@@ -1881,6 +1914,15 @@
kernel :
func : thresholded_relu_grad
- backward_api : tile_double_grad
forward : tile_grad (Tensor x, Tensor grad_out, IntArray repeat_times) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray repeat_times)
output : Tensor(grad_out_grad)
infer_meta :
func : TileInferMeta
kernel :
func : tile
- backward_api : tile_grad
forward : tile (Tensor x, IntArray repeat_times) -> Tensor(out)
args : (Tensor x, Tensor out_grad, IntArray repeat_times)
......@@ -1891,6 +1933,7 @@
kernel :
func : tile_grad
no_need_buffer : x
backward : tile_double_grad
- backward_api : top_k_grad
forward : top_k (Tensor x, Scalar k, int axis = -1, bool largest = true, bool sorted = true) -> Tensor(out), Tensor(indices)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册