未验证 提交 440b96d3 编写于 作者: C Charles-hit 提交者: GitHub

support sign op backward refuse forward (#46002)

上级 491e4df3
......@@ -2096,6 +2096,12 @@
optional : grad_grad_out_grad
inplace : (grad_grad_x -> fwd_grad_out_grad)
- backward_op : sign_grad
forward : sign (Tensor x) -> Tensor(out)
args : (Tensor out_grad)
output : Tensor(x_grad)
invoke : scale(out_grad, 0.0, 0.0, true)
- backward_op : silu_grad
forward : silu (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......
......@@ -2377,6 +2377,7 @@
func : UnchangedInferMeta
kernel :
func : sign
backward : sign_grad
- op : silu
args : (Tensor x)
......
......@@ -19,7 +19,11 @@ import numpy as np
from op_test import OpTest
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
class TestSignOp(OpTest):
......@@ -91,6 +95,80 @@ class TestSignAPI(unittest.TestCase):
paddle.sign(input4)
class TestSignDoubleGradCheck(unittest.TestCase):
def sign_wrapper(self, x):
return paddle.sign(x[0])
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32
data = layers.data('data', [1, 4], False, dtype)
data.persistable = True
out = paddle.sign(data)
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.double_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(self.sign_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestSignTripleGradCheck(unittest.TestCase):
def sign_wrapper(self, x):
return paddle.sign(x[0])
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32
data = layers.data('data', [1, 4], False, dtype)
data.persistable = True
out = paddle.sign(data)
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.triple_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.triple_grad_check_for_dygraph(self.sign_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
if __name__ == "__main__":
paddle.enable_static()
unittest.main()
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册