未验证 提交 e862753c 编写于 作者: X xiaoguoguo626807 提交者: GitHub

revert_tanh_double_grad (#54062)

上级 4fa8a676
......@@ -66,7 +66,6 @@ black_ops_list = [
# kernel performs same to it.
prim_white_list = [
"matmul_double_grad",
"tanh_double_grad",
"subtract_double_grad",
"silu_double_grad",
]
......
......@@ -2017,6 +2017,7 @@
func : tanh_double_grad
composite : tanh_double_grad(out, grad_out, grad_x_grad, out_grad, grad_out_grad)
inplace : (grad_x_grad -> grad_out_grad)
backward : tanh_triple_grad
- backward_op : tanh_grad
forward : tanh (Tensor x) -> Tensor(out)
......@@ -2042,6 +2043,18 @@
func : tanh_shrink_grad
inplace : (out_grad -> x_grad)
- backward_op : tanh_triple_grad
forward : tanh_double_grad (Tensor out, Tensor grad_out_forward, Tensor grad_x_grad_forward) -> Tensor(grad_out_new), Tensor(grad_out_grad)
args : (Tensor out, Tensor grad_out_forward, Tensor grad_x_grad_forward, Tensor grad_out_new_grad, Tensor grad_out_grad_grad)
output : Tensor(out_grad), Tensor(grad_out_forward_grad), Tensor(grad_x_grad_forward_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param : [out, out, grad_x_grad_forward]
kernel :
func : tanh_triple_grad
inplace : (grad_x_grad_forward -> grad_out_forward_grad)
optional : grad_out_new_grad, grad_out_grad_grad
- backward_op : temporal_shift_grad
forward : temporal_shift(Tensor x, int seg_num, float shift_ratio = 0.25f, str data_format = "NCHW") -> Tensor(out)
args : (Tensor out_grad, int seg_num, float shift_ratio, str data_format)
......
......@@ -2323,7 +2323,7 @@
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : tanh
backward : tanh_grad, tanh_double_grad (tanh_grad_grad)
backward : tanh_grad, tanh_double_grad (tanh_grad_grad), tanh_triple_grad
inputs :
x : X
outputs :
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册