Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
BaiXuePrincess
Paddle
提交
c01bcbf6
P
Paddle
项目概览
BaiXuePrincess
/
Paddle
与 Fork 源项目一致
Fork自
PaddlePaddle / Paddle
通知
1
Star
1
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
提交
c01bcbf6
编写于
3月 19, 2022
作者:
P
phlrain
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
fix bugs
上级
111ee988
变更
11
隐藏空白更改
内联
并排
Showing
11 changed file
with
289 addition
and
46 deletion
+289
-46
python/paddle/fluid/layers/layer_function_generator.py
python/paddle/fluid/layers/layer_function_generator.py
+2
-1
python/paddle/fluid/layers/loss.py
python/paddle/fluid/layers/loss.py
+1
-1
python/paddle/fluid/layers/metric_op.py
python/paddle/fluid/layers/metric_op.py
+3
-3
python/paddle/fluid/tests/unittests/op_test.py
python/paddle/fluid/tests/unittests/op_test.py
+1
-0
python/paddle/fluid/tests/unittests/test_filter_by_instag_op.py
.../paddle/fluid/tests/unittests/test_filter_by_instag_op.py
+1
-0
python/paddle/fluid/tests/unittests/test_hash_op.py
python/paddle/fluid/tests/unittests/test_hash_op.py
+2
-0
python/paddle/fluid/tests/unittests/test_split_op.py
python/paddle/fluid/tests/unittests/test_split_op.py
+1
-0
python/paddle/metric/metrics.py
python/paddle/metric/metrics.py
+3
-3
python/paddle/tensor/manipulation.py
python/paddle/tensor/manipulation.py
+2
-3
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+132
-10
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+141
-25
未找到文件。
python/paddle/fluid/layers/layer_function_generator.py
浏览文件 @
c01bcbf6
...
@@ -260,7 +260,8 @@ def generate_activation_fn(op_type):
...
@@ -260,7 +260,8 @@ def generate_activation_fn(op_type):
if
in_dygraph_mode
():
if
in_dygraph_mode
():
if
_in_eager_mode
():
if
_in_eager_mode
():
op
=
getattr
(
_C_ops
,
"final_state_"
+
op_type
)
op
=
getattr
(
_C_ops
,
"final_state_"
+
op_type
)
return
op
(
x
)
if
op
:
return
op
(
x
)
op
=
getattr
(
_C_ops
,
op_type
)
op
=
getattr
(
_C_ops
,
op_type
)
return
op
(
x
)
return
op
(
x
)
...
...
python/paddle/fluid/layers/loss.py
浏览文件 @
c01bcbf6
...
@@ -21,7 +21,7 @@ from paddle.utils import deprecated
...
@@ -21,7 +21,7 @@ from paddle.utils import deprecated
from
.
import
nn
from
.
import
nn
from
.layer_function_generator
import
templatedoc
from
.layer_function_generator
import
templatedoc
from
..layer_helper
import
LayerHelper
from
..layer_helper
import
LayerHelper
from
..framework
import
Variable
,
in_dygraph_mode
,
static_only
,
in_dygraph_mode
from
..framework
import
Variable
,
in_dygraph_mode
,
static_only
,
in_dygraph_mode
,
_in_eager_mode
from
..
import
core
from
..
import
core
from
..data_feeder
import
check_variable_and_dtype
,
check_type
from
..data_feeder
import
check_variable_and_dtype
,
check_type
from
..param_attr
import
ParamAttr
from
..param_attr
import
ParamAttr
...
...
python/paddle/fluid/layers/metric_op.py
浏览文件 @
c01bcbf6
...
@@ -87,9 +87,9 @@ def accuracy(input, label, k=1, correct=None, total=None):
...
@@ -87,9 +87,9 @@ def accuracy(input, label, k=1, correct=None, total=None):
_k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
_k
=
k
.
numpy
().
item
(
0
)
if
isinstance
(
k
,
Variable
)
else
k
topk_out
,
topk_indices
=
_C_ops
.
top_k_v2
(
input
,
'k'
,
_k
,
'sorted'
,
topk_out
,
topk_indices
=
_C_ops
.
top_k_v2
(
input
,
'k'
,
_k
,
'sorted'
,
False
)
False
)
if
_in_eager_mode
():
#
if _in_eager_mode():
_acc
=
_C_ops
.
final_state_accuracy
(
topk_out
,
topk_indices
,
label
)
#
_acc = _C_ops.final_state_accuracy(topk_out, topk_indices, label)
return
_acc
#
return _acc
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
total
)
return
_acc
return
_acc
...
...
python/paddle/fluid/tests/unittests/op_test.py
浏览文件 @
c01bcbf6
...
@@ -1501,6 +1501,7 @@ class OpTest(unittest.TestCase):
...
@@ -1501,6 +1501,7 @@ class OpTest(unittest.TestCase):
.
recursive_sequence_lengths
(),
expect
[
1
],
.
recursive_sequence_lengths
(),
expect
[
1
],
"Output ("
+
out_name
+
") has different lod at "
+
"Output ("
+
out_name
+
") has different lod at "
+
str
(
place
)
+
" in eager dygraph mode"
)
str
(
place
)
+
" in eager dygraph mode"
)
if
check_eager
:
with
fluid
.
dygraph
.
base
.
guard
():
with
fluid
.
dygraph
.
base
.
guard
():
with
_test_eager_guard
():
with
_test_eager_guard
():
self
.
assertListEqual
(
self
.
assertListEqual
(
...
...
python/paddle/fluid/tests/unittests/test_filter_by_instag_op.py
浏览文件 @
c01bcbf6
...
@@ -285,4 +285,5 @@ class TestFilterByInstagOp7(OpTest):
...
@@ -285,4 +285,5 @@ class TestFilterByInstagOp7(OpTest):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_hash_op.py
浏览文件 @
c01bcbf6
...
@@ -16,6 +16,7 @@ import unittest
...
@@ -16,6 +16,7 @@ import unittest
import
numpy
as
np
import
numpy
as
np
from
op_test
import
OpTest
from
op_test
import
OpTest
import
paddle.fluid
as
fluid
import
paddle.fluid
as
fluid
import
paddle
class
TestHashOp
(
OpTest
):
class
TestHashOp
(
OpTest
):
...
@@ -140,4 +141,5 @@ class TestHashOpError(unittest.TestCase):
...
@@ -140,4 +141,5 @@ class TestHashOpError(unittest.TestCase):
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/fluid/tests/unittests/test_split_op.py
浏览文件 @
c01bcbf6
...
@@ -460,4 +460,5 @@ class API_TestDygraphSplit(unittest.TestCase):
...
@@ -460,4 +460,5 @@ class API_TestDygraphSplit(unittest.TestCase):
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
paddle
.
enable_static
()
unittest
.
main
()
unittest
.
main
()
python/paddle/metric/metrics.py
浏览文件 @
c01bcbf6
...
@@ -798,9 +798,9 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
...
@@ -798,9 +798,9 @@ def accuracy(input, label, k=1, correct=None, total=None, name=None):
total
=
_varbase_creator
(
dtype
=
"int32"
)
total
=
_varbase_creator
(
dtype
=
"int32"
)
topk_out
,
topk_indices
=
paddle
.
topk
(
input
,
k
=
k
)
topk_out
,
topk_indices
=
paddle
.
topk
(
input
,
k
=
k
)
if
_in_eager_mode
():
#
if _in_eager_mode():
_acc
=
_C_ops
.
final_state_accuracy
(
topk_out
,
topk_indices
,
label
)
#
_acc = _C_ops.final_state_accuracy(topk_out, topk_indices, label)
return
_acc
#
return _acc
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
_acc
,
_
,
_
=
_C_ops
.
accuracy
(
topk_out
,
topk_indices
,
label
,
correct
,
total
)
total
)
...
...
python/paddle/tensor/manipulation.py
浏览文件 @
c01bcbf6
...
@@ -948,9 +948,8 @@ def split(x, num_or_sections, axis=0, name=None):
...
@@ -948,9 +948,8 @@ def split(x, num_or_sections, axis=0, name=None):
print(out1.shape) # [3, 3, 5]
print(out1.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
print(out2.shape) # [3, 3, 5]
"""
"""
if
paddle
.
in_dygraph_mode
():
if
paddle
.
in_dynamic_mode
()
and
_in_eager_mode
():
if
_in_eager_mode
():
return
_C_ops
.
final_state_split
(
x
,
num_or_sections
,
dim
)
return
_C_ops
.
final_state_split
(
x
,
num_or_sections
,
dim
)
return
paddle
.
fluid
.
layers
.
split
(
return
paddle
.
fluid
.
layers
.
split
(
input
=
x
,
num_or_sections
=
num_or_sections
,
dim
=
axis
,
name
=
name
)
input
=
x
,
num_or_sections
=
num_or_sections
,
dim
=
axis
,
name
=
name
)
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
c01bcbf6
...
@@ -166,6 +166,7 @@
...
@@ -166,6 +166,7 @@
kernel
:
kernel
:
func
:
relu
func
:
relu
inplace
:
(x -> out)
inplace
:
(x -> out)
backward
:
relu_grad
-
api
:
scale
-
api
:
scale
args
:
(Tensor x, Scalar scale, float bias, bool bias_after_scale)
args
:
(Tensor x, Scalar scale, float bias, bool bias_after_scale)
...
@@ -191,7 +192,8 @@
...
@@ -191,7 +192,8 @@
infer_meta
:
infer_meta
:
func
:
SoftmaxInferMeta
func
:
SoftmaxInferMeta
kernel
:
kernel
:
func
:
sotfmax
func
:
softmax
backward
:
softmax_grad
-
api
:
split
-
api
:
split
args
:
(Tensor x, ScalarArray num_or_sections, Scalar axis)
args
:
(Tensor x, ScalarArray num_or_sections, Scalar axis)
...
@@ -342,15 +344,15 @@
...
@@ -342,15 +344,15 @@
backward
:
segment_pool_grad
backward
:
segment_pool_grad
# accuracy
#
#
accuracy
-
api
:
accuracy
#
- api : accuracy
args
:
(Tensor x, Tensor indices, Tensor label)
#
args : (Tensor x, Tensor indices, Tensor label)
output
:
Tensor(accuracy), Tensor(correct), Tensor(total)
#
output : Tensor(accuracy), Tensor(correct), Tensor(total)
infer_meta
:
#
infer_meta :
func
:
AccuracyInferMeta
#
func : AccuracyInferMeta
kernel
:
#
kernel :
func
:
accuracy
#
func : accuracy
dtype
:
x
#
dtype : x
# sin
# sin
-
api
:
sin
-
api
:
sin
...
@@ -475,6 +477,126 @@
...
@@ -475,6 +477,126 @@
func
:
sigmoid
func
:
sigmoid
backward
:
sigmoid_grad
backward
:
sigmoid_grad
# tan
-
api
:
tan
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
tan
backward
:
tan_grad
# tanh_shrink
-
api
:
tanh_shrink
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
tanh_shrink
backward
:
tanh_shrink_grad
# silu
-
api
:
silu
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
silu
backward
:
silu_grad
# logsigmoid
-
api
:
logsigmoid
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
logsigmoid
backward
:
logsigmoid_grad
# leaky_relu
-
api
:
leaky_relu
args
:
(Tensor x, float alpha)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
leaky_relu
backward
:
leaky_relu_grad
# thresholded_relu
-
api
:
thresholded_relu
args
:
(Tensor x, float threshold)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
thresholded_relu
backward
:
thresholded_relu_grad
# soft_shrink
-
api
:
soft_shrink
args
:
(Tensor x, float lambda)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
soft_shrink
backward
:
soft_shrink_grad
# hard_shrink
-
api
:
hard_shrink
args
:
(Tensor x, float threshold)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_shrink
backward
:
hard_shrink_grad
# elu
-
api
:
elu
args
:
(Tensor x, float alpha)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
elu
backward
:
elu_grad
# brelu
-
api
:
brelu
args
:
(Tensor x, float t_min, float t_max)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
brelu
backward
:
brelu_grad
# hard_sigmoid
-
api
:
hard_sigmoid
args
:
(Tensor x, float slope, float offset)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_sigmoid
backward
:
hard_sigmoid_grad
# arg_min # int64 ???? dtype
# arg_min # int64 ???? dtype
-
api
:
argmin
-
api
:
argmin
args
:
(Tensor x, int64 axis, bool keepdims, bool flatten, int dtype)
args
:
(Tensor x, int64 axis, bool keepdims, bool flatten, int dtype)
...
...
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
c01bcbf6
...
@@ -75,11 +75,11 @@
...
@@ -75,11 +75,11 @@
kernel
:
kernel
:
func
:
diagonal_grad
func
:
diagonal_grad
#
- backward_api : split_grad
-
backward_api
:
split_grad
#
forward : split (Tensor x, ScalarArray num_or_sections, Scalar axis) -> Tensor[](out)
forward
:
split (Tensor x, ScalarArray num_or_sections, Scalar axis) -> Tensor[](out)
#
args : (Tensor[] out_grad, Scalar axis)
args
:
(Tensor[] out_grad, Scalar axis)
#
output : Tensor(x_grad)
output
:
Tensor(x_grad)
#
invoke : concat( out_grad, axis)
invoke
:
concat( out_grad, axis)
# TODO(zhangyunfei) The config of double grad and triple grad will be supported in the future.
# TODO(zhangyunfei) The config of double grad and triple grad will be supported in the future.
# - backward_api : matmul_triple_grad
# - backward_api : matmul_triple_grad
...
@@ -165,11 +165,11 @@
...
@@ -165,11 +165,11 @@
-
backward_api
:
cos_grad
-
backward_api
:
cos_grad
forward
:
cos (Tensor x) -> Tensor(out)
forward
:
cos (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
cos_grad
func
:
cos_grad
...
@@ -185,91 +185,91 @@
...
@@ -185,91 +185,91 @@
-
backward_api
:
acos_grad
-
backward_api
:
acos_grad
forward
:
acos (Tensor x) -> Tensor(out)
forward
:
acos (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
acos_grad
func
:
acos_grad
-
backward_api
:
sin_grad
-
backward_api
:
sin_grad
forward
:
sin (Tensor x) -> Tensor(out)
forward
:
sin (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
sin_grad
func
:
sin_grad
-
backward_api
:
asin_grad
-
backward_api
:
asin_grad
forward
:
asin (Tensor x) -> Tensor(out)
forward
:
asin (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
asin_grad
func
:
asin_grad
-
backward_api
:
atan_grad
-
backward_api
:
atan_grad
forward
:
atan (Tensor x) -> Tensor(out)
forward
:
atan (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
atan_grad
func
:
atan_grad
-
backward_api
:
sinh_grad
-
backward_api
:
sinh_grad
forward
:
sinh (Tensor x) -> Tensor(out)
forward
:
sinh (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
sinh_grad
func
:
sinh_grad
-
backward_api
:
cosh_grad
-
backward_api
:
cosh_grad
forward
:
cosh (Tensor x) -> Tensor(out)
forward
:
cosh (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
cosh_grad
func
:
cosh_grad
-
backward_api
:
asinh_grad
-
backward_api
:
asinh_grad
forward
:
asinh (Tensor x) -> Tensor(out)
forward
:
asinh (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
asinh_grad
func
:
asinh_grad
-
backward_api
:
acosh_grad
-
backward_api
:
acosh_grad
forward
:
acosh (Tensor x) -> Tensor(out)
forward
:
acosh (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
acosh_grad
func
:
acosh_grad
-
backward_api
:
atanh_grad
-
backward_api
:
atanh_grad
forward
:
atanh (Tensor x) -> Tensor(out)
forward
:
atanh (Tensor x) -> Tensor(out)
args
:
(Tensor
out
, Tensor out_grad)
args
:
(Tensor
x
, Tensor out_grad)
output
:
Tensor(x_grad)
output
:
Tensor(x_grad)
infer_meta
:
infer_meta
:
func
:
UnchangedInferMeta
func
:
UnchangedInferMeta
param
:
[
out
]
param
:
[
x
]
kernel
:
kernel
:
func
:
atanh_grad
func
:
atanh_grad
...
@@ -293,6 +293,122 @@
...
@@ -293,6 +293,122 @@
kernel
:
kernel
:
func
:
sigmoid_grad
func
:
sigmoid_grad
-
backward_api
:
tan_grad
forward
:
tan (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
relu_grad
-
backward_api
:
tanh_shrink_grad
forward
:
tanh_shrink (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
tanh_shrink_grad
-
backward_api
:
silu_grad
forward
:
silu (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
silu_grad
-
backward_api
:
logsigmoid_grad
forward
:
logsigmoid (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
logsigmoid_grad
-
backward_api
:
leaky_relu_grad
forward
:
leaky_relu (Tensor x, float alpha) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
leaky_relu_grad
-
backward_api
:
thresholded_relu_grad
forward
:
thresholded_relu (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
thresholded_relu_grad
-
backward_api
:
soft_shrink_grad
forward
:
soft_shrink (Tensor x, float lambda) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float lambda)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
soft_shrink_grad
-
backward_api
:
hard_shrink_grad
forward
:
hard_shrink (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_shrink_grad
-
backward_api
:
elu_grad
forward
:
elu (Tensor x, float alpha) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
elu_grad
-
backward_api
:
brelu_grad
forward
:
brelu (Tensor x, float t_min, float t_max) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float t_min, float t_max)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
brelu_grad
-
backward_api
:
hard_sigmoid_grad
forward
:
hard_sigmoid (Tensor x, float slope, float offset) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, float slope, float offset)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
hard_sigmoid_grad
-
backward_api
:
argsort_grad
-
backward_api
:
argsort_grad
forward
:
argsort (Tensor x, int axis, bool descending) -> Tensor(out), Tensor(indices)
forward
:
argsort (Tensor x, int axis, bool descending) -> Tensor(out), Tensor(indices)
args
:
(Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
args
:
(Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录