Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
PaddlePaddle
Paddle
提交
fcd32950
P
Paddle
项目概览
PaddlePaddle
/
Paddle
大约 2 年 前同步成功
通知
2325
Star
20933
Fork
5424
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
1423
列表
看板
标记
里程碑
合并请求
543
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
P
Paddle
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
1,423
Issue
1,423
列表
看板
标记
里程碑
合并请求
543
合并请求
543
Pages
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
提交
Issue看板
未验证
提交
fcd32950
编写于
6月 15, 2022
作者:
Z
zyfncg
提交者:
GitHub
6月 15, 2022
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Rename yaml (#43470)
* rename yaml file * fix merge conflict * fix infrt
上级
346efe96
变更
12
隐藏空白更改
内联
并排
Showing
12 changed file
with
4724 addition
and
4712 deletion
+4724
-4712
paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt
.../auto_code_generator/final_state_generator/CMakeLists.txt
+2
-2
paddle/phi/api/lib/CMakeLists.txt
paddle/phi/api/lib/CMakeLists.txt
+18
-18
python/paddle/utils/code_gen/api.yaml
python/paddle/utils/code_gen/api.yaml
+0
-2357
python/paddle/utils/code_gen/backward.yaml
python/paddle/utils/code_gen/backward.yaml
+0
-2328
python/paddle/utils/code_gen/cross_validate.py
python/paddle/utils/code_gen/cross_validate.py
+1
-1
python/paddle/utils/code_gen/legacy_api.yaml
python/paddle/utils/code_gen/legacy_api.yaml
+2357
-0
python/paddle/utils/code_gen/legacy_backward.yaml
python/paddle/utils/code_gen/legacy_backward.yaml
+2317
-0
python/paddle/utils/code_gen/new_api.yaml
python/paddle/utils/code_gen/new_api.yaml
+0
-0
python/paddle/utils/code_gen/new_backward.yaml
python/paddle/utils/code_gen/new_backward.yaml
+0
-0
tools/infrt/generate_phi_kernel_dialect.py
tools/infrt/generate_phi_kernel_dialect.py
+11
-3
tools/infrt/get_phi_kernel_function.sh
tools/infrt/get_phi_kernel_function.sh
+1
-1
tools/infrt/get_phi_kernel_info.py
tools/infrt/get_phi_kernel_info.py
+17
-2
未找到文件。
paddle/fluid/eager/auto_code_generator/final_state_generator/CMakeLists.txt
浏览文件 @
fcd32950
set
(
api_yaml_path
set
(
api_yaml_path
"
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/api.yaml,
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/
new
_api.yaml,
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/sparse_api.yaml"
"
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/api.yaml,
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/
legacy
_api.yaml,
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/sparse_api.yaml"
)
)
set
(
backward_yaml_path
set
(
backward_yaml_path
"
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/backward.yaml,
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/
new
_backward.yaml,
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/sparse_bw_api.yaml"
"
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/backward.yaml,
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/
legacy
_backward.yaml,
${
PADDLE_SOURCE_DIR
}
/python/paddle/utils/code_gen/sparse_bw_api.yaml"
)
)
set
(
tmp_forwards_cc_path
set
(
tmp_forwards_cc_path
"
${
PADDLE_SOURCE_DIR
}
/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.cc"
"
${
PADDLE_SOURCE_DIR
}
/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.cc"
...
...
paddle/phi/api/lib/CMakeLists.txt
浏览文件 @
fcd32950
...
@@ -22,8 +22,8 @@ set(api_gen_base ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen/api_base.py)
...
@@ -22,8 +22,8 @@ set(api_gen_base ${CMAKE_SOURCE_DIR}/python/paddle/utils/code_gen/api_base.py)
# forward api file
# forward api file
set
(
api_gen_file
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/api_gen.py
)
set
(
api_gen_file
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/api_gen.py
)
set
(
api_yaml_file
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/api.yaml
)
set
(
api_yaml_file
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/api.yaml
)
set
(
new
_api_yaml_file
set
(
legacy
_api_yaml_file
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/
new
_api.yaml
)
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/
legacy
_api.yaml
)
set
(
api_header_file
${
CMAKE_SOURCE_DIR
}
/paddle/phi/api/include/api.h
)
set
(
api_header_file
${
CMAKE_SOURCE_DIR
}
/paddle/phi/api/include/api.h
)
set
(
api_source_file
${
CMAKE_SOURCE_DIR
}
/paddle/phi/api/lib/api.cc
)
set
(
api_source_file
${
CMAKE_SOURCE_DIR
}
/paddle/phi/api/lib/api.cc
)
set
(
api_header_file_tmp
${
api_header_file
}
.tmp
)
set
(
api_header_file_tmp
${
api_header_file
}
.tmp
)
...
@@ -34,8 +34,8 @@ set(bw_api_gen_file
...
@@ -34,8 +34,8 @@ set(bw_api_gen_file
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/backward_api_gen.py
)
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/backward_api_gen.py
)
set
(
bw_api_yaml_file
set
(
bw_api_yaml_file
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/backward.yaml
)
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/backward.yaml
)
set
(
new
_bw_api_yaml_file
set
(
legacy
_bw_api_yaml_file
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/
new
_backward.yaml
)
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen/
legacy
_backward.yaml
)
set
(
bw_api_header_file
set
(
bw_api_header_file
${
CMAKE_SOURCE_DIR
}
/paddle/phi/api/backward/backward_api.h
)
${
CMAKE_SOURCE_DIR
}
/paddle/phi/api/backward/backward_api.h
)
set
(
bw_api_source_file
${
CMAKE_SOURCE_DIR
}
/paddle/phi/api/lib/backward_api.cc
)
set
(
bw_api_source_file
${
CMAKE_SOURCE_DIR
}
/paddle/phi/api/lib/backward_api.cc
)
...
@@ -111,21 +111,21 @@ set(generated_argument_mapping_path
...
@@ -111,21 +111,21 @@ set(generated_argument_mapping_path
message
(
message
(
"parse api yamls:
"parse api yamls:
-
${
api_yaml_file
}
-
${
api_yaml_file
}
-
${
new
_api_yaml_file
}
-
${
legacy
_api_yaml_file
}
-
${
bw_api_yaml_file
}
-
${
bw_api_yaml_file
}
-
${
new
_bw_api_yaml_file
}
"
)
-
${
legacy
_bw_api_yaml_file
}
"
)
execute_process
(
execute_process
(
WORKING_DIRECTORY
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen
WORKING_DIRECTORY
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen
COMMAND
${
CMAKE_COMMAND
}
-E make_directory
${
parsed_api_dir
}
COMMAND
${
CMAKE_COMMAND
}
-E make_directory
${
parsed_api_dir
}
COMMAND
${
PYTHON_EXECUTABLE
}
parse_api.py --api_yaml_path ./api.yaml
COMMAND
${
PYTHON_EXECUTABLE
}
parse_api.py --api_yaml_path ./api.yaml
--output_path ./parsed_apis/api.parsed.yaml
--output_path ./parsed_apis/api.parsed.yaml
COMMAND
${
PYTHON_EXECUTABLE
}
parse_api.py --api_yaml_path ./
new
_api.yaml
COMMAND
${
PYTHON_EXECUTABLE
}
parse_api.py --api_yaml_path ./
legacy
_api.yaml
--output_path ./parsed_apis/
new
_api.parsed.yaml
--output_path ./parsed_apis/
legacy
_api.parsed.yaml
COMMAND
${
PYTHON_EXECUTABLE
}
parse_api.py --api_yaml_path ./backward.yaml
COMMAND
${
PYTHON_EXECUTABLE
}
parse_api.py --api_yaml_path ./backward.yaml
--output_path ./parsed_apis/backward_api.parsed.yaml --backward
--output_path ./parsed_apis/backward_api.parsed.yaml --backward
COMMAND
COMMAND
${
PYTHON_EXECUTABLE
}
parse_api.py --api_yaml_path ./
new
_backward.yaml
${
PYTHON_EXECUTABLE
}
parse_api.py --api_yaml_path ./
legacy
_backward.yaml
--output_path ./parsed_apis/
new
_backward_api.parsed.yaml --backward
--output_path ./parsed_apis/
legacy
_backward_api.parsed.yaml --backward
RESULTS_VARIABLE _results
)
RESULTS_VARIABLE _results
)
foreach
(
_result in
${
_results
}
)
foreach
(
_result in
${
_results
}
)
if
(
${
_result
}
)
if
(
${
_result
}
)
...
@@ -141,9 +141,9 @@ execute_process(
...
@@ -141,9 +141,9 @@ execute_process(
WORKING_DIRECTORY
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen
WORKING_DIRECTORY
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen
COMMAND
COMMAND
${
PYTHON_EXECUTABLE
}
cross_validate.py --forward_yaml_paths
${
PYTHON_EXECUTABLE
}
cross_validate.py --forward_yaml_paths
./parsed_apis/api.parsed.yaml ./parsed_apis/
new
_api.parsed.yaml
./parsed_apis/api.parsed.yaml ./parsed_apis/
legacy
_api.parsed.yaml
--backward_yaml_paths ./parsed_apis/backward_api.parsed.yaml
--backward_yaml_paths ./parsed_apis/backward_api.parsed.yaml
./parsed_apis/
new
_backward_api.parsed.yaml
./parsed_apis/
legacy
_backward_api.parsed.yaml
RESULT_VARIABLE _result
)
RESULT_VARIABLE _result
)
if
(
${
_result
}
)
if
(
${
_result
}
)
message
(
FATAL_ERROR
"api validation failed, exiting."
)
message
(
FATAL_ERROR
"api validation failed, exiting."
)
...
@@ -158,8 +158,8 @@ execute_process(
...
@@ -158,8 +158,8 @@ execute_process(
WORKING_DIRECTORY
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen
WORKING_DIRECTORY
${
CMAKE_SOURCE_DIR
}
/python/paddle/utils/code_gen
COMMAND
COMMAND
${
PYTHON_EXECUTABLE
}
generate_op.py --api_yaml_path
${
PYTHON_EXECUTABLE
}
generate_op.py --api_yaml_path
./parsed_apis/
new_
api.parsed.yaml --backward_api_yaml_path
./parsed_apis/api.parsed.yaml --backward_api_yaml_path
./parsed_apis/
new_
backward_api.parsed.yaml --output_op_path
./parsed_apis/backward_api.parsed.yaml --output_op_path
"
${
generated_op_path
}
.tmp"
--output_arg_map_path
"
${
generated_op_path
}
.tmp"
--output_arg_map_path
"
${
generated_argument_mapping_path
}
.tmp"
"
${
generated_argument_mapping_path
}
.tmp"
RESULT_VARIABLE _result
)
RESULT_VARIABLE _result
)
...
@@ -209,7 +209,7 @@ add_custom_command(
...
@@ -209,7 +209,7 @@ add_custom_command(
COMMAND
${
PYTHON_EXECUTABLE
}
-m pip install pyyaml
COMMAND
${
PYTHON_EXECUTABLE
}
-m pip install pyyaml
COMMAND
COMMAND
${
PYTHON_EXECUTABLE
}
${
api_gen_file
}
--api_yaml_path
${
api_yaml_file
}
${
PYTHON_EXECUTABLE
}
${
api_gen_file
}
--api_yaml_path
${
api_yaml_file
}
${
new
_api_yaml_file
}
--api_header_path
${
api_header_file_tmp
}
${
legacy
_api_yaml_file
}
--api_header_path
${
api_header_file_tmp
}
--api_header_path
${
api_header_file_tmp
}
--api_source_path
--api_header_path
${
api_header_file_tmp
}
--api_source_path
${
api_source_file_tmp
}
${
api_source_file_tmp
}
COMMAND
${
CMAKE_COMMAND
}
-E copy_if_different
${
api_header_file_tmp
}
COMMAND
${
CMAKE_COMMAND
}
-E copy_if_different
${
api_header_file_tmp
}
...
@@ -226,7 +226,7 @@ add_custom_command(
...
@@ -226,7 +226,7 @@ add_custom_command(
${
bw_api_source_file_tmp
}
${
bw_api_source_file_tmp
}
COMMAND
COMMAND
${
PYTHON_EXECUTABLE
}
${
bw_api_gen_file
}
--backward_yaml_path
${
PYTHON_EXECUTABLE
}
${
bw_api_gen_file
}
--backward_yaml_path
${
bw_api_yaml_file
}
${
new
_bw_api_yaml_file
}
--backward_header_path
${
bw_api_yaml_file
}
${
legacy
_bw_api_yaml_file
}
--backward_header_path
${
bw_api_header_file_tmp
}
--backward_source_path
${
bw_api_source_file_tmp
}
${
bw_api_header_file_tmp
}
--backward_source_path
${
bw_api_source_file_tmp
}
COMMAND
${
CMAKE_COMMAND
}
-E copy_if_different
${
bw_api_header_file_tmp
}
COMMAND
${
CMAKE_COMMAND
}
-E copy_if_different
${
bw_api_header_file_tmp
}
${
bw_api_header_file
}
${
bw_api_header_file
}
...
@@ -293,7 +293,7 @@ add_custom_command(
...
@@ -293,7 +293,7 @@ add_custom_command(
OUTPUT
${
dygraph_api_header_file
}
${
dygraph_api_source_file
}
OUTPUT
${
dygraph_api_header_file
}
${
dygraph_api_source_file
}
COMMAND
COMMAND
${
PYTHON_EXECUTABLE
}
${
im_api_gen_file
}
--api_yaml_path
${
api_yaml_file
}
${
PYTHON_EXECUTABLE
}
${
im_api_gen_file
}
--api_yaml_path
${
api_yaml_file
}
${
new
_api_yaml_file
}
--sparse_api_yaml_path
${
sparse_api_yaml_file
}
${
legacy
_api_yaml_file
}
--sparse_api_yaml_path
${
sparse_api_yaml_file
}
--dygraph_api_header_path
${
dygraph_api_header_file_tmp
}
--dygraph_api_header_path
${
dygraph_api_header_file_tmp
}
--dygraph_api_source_path
${
dygraph_api_source_file_tmp
}
--dygraph_api_source_path
${
dygraph_api_source_file_tmp
}
COMMAND
${
CMAKE_COMMAND
}
-E copy_if_different
${
dygraph_api_header_file_tmp
}
COMMAND
${
CMAKE_COMMAND
}
-E copy_if_different
${
dygraph_api_header_file_tmp
}
...
@@ -309,7 +309,7 @@ add_custom_command(
...
@@ -309,7 +309,7 @@ add_custom_command(
OUTPUT
${
wrapped_infermeta_header_file
}
${
wrapped_infermeta_source_file
}
OUTPUT
${
wrapped_infermeta_header_file
}
${
wrapped_infermeta_source_file
}
COMMAND
COMMAND
${
PYTHON_EXECUTABLE
}
${
wrapped_infermeta_gen_file
}
--api_yaml_path
${
PYTHON_EXECUTABLE
}
${
wrapped_infermeta_gen_file
}
--api_yaml_path
${
api_yaml_file
}
${
new
_api_yaml_file
}
--wrapped_infermeta_header_path
${
api_yaml_file
}
${
legacy
_api_yaml_file
}
--wrapped_infermeta_header_path
${
wrapped_infermeta_header_file
}
--wrapped_infermeta_source_path
${
wrapped_infermeta_header_file
}
--wrapped_infermeta_source_path
${
wrapped_infermeta_source_file
}
${
wrapped_infermeta_source_file
}
DEPENDS
${
api_yaml_file
}
${
wrapped_infermeta_gen_file
}
${
api_gen_base
}
DEPENDS
${
api_yaml_file
}
${
wrapped_infermeta_gen_file
}
${
api_gen_base
}
...
...
python/paddle/utils/code_gen/api.yaml
浏览文件 @
fcd32950
-
api
:
abs
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
RealAndImagInferMeta
kernel
:
func
:
abs
backward
:
abs_grad
# accuracy
-
api
:
accuracy
args
:
(Tensor x, Tensor indices, Tensor label)
output
:
Tensor(accuracy), Tensor(correct), Tensor(total)
infer_meta
:
func
:
AccuracyInferMeta
kernel
:
func
:
accuracy
dtype
:
x
# acos
-
api
:
acos
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
acos
backward
:
acos_grad
# acosh
-
api
:
acosh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
acosh
backward
:
acosh_grad
-
api
:
adadelta
args
:
(Tensor param, Tensor grad, Tensor avg_squared_grad, Tensor avg_squared_update, float rho, float epsilon)
output
:
Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out)
infer_meta
:
func
:
AdadeltaInferMeta
kernel
:
func
:
adadelta
-
api
:
adam
args
:
(Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, Scalar beta1, Scalar beta2, Scalar epsilon, bool lazy_mode, int64_t min_row_size_to_use_multithread, bool multi_precision, bool use_global_beta_pow)
output
:
Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_outs)
optional
:
master_param, skip_update
invoke
:
adam_impl(param, grad, learning_rate, moment1, moment2, beta1_pow, beta2_pow, master_param, skip_update, beta1, beta2, epsilon, lazy_mode, min_row_size_to_use_multithread, multi_precision, use_global_beta_pow)
-
api
:
adamax
args
:
(Tensor param, Tensor grad, Tensor learning_rate, Tensor moment, Tensor inf_norm, Tensor beta1_pow, float beta1, float beta2, float epsilon)
output
:
Tensor(param_out), Tensor(avg_squared_grad_out), Tensor(avg_squared_update_out)
infer_meta
:
func
:
AdamaxInferMeta
kernel
:
func
:
adamax
-
api
:
adamw
args
:
(Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, Scalar beta1, Scalar beta2, Scalar epsilon, float lr_ratio, float coeff, bool with_decay, bool lazy_mode, int64_t min_row_size_to_use_multithread, bool multi_precision, bool use_global_beta_pow)
output
:
Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_outs)
optional
:
master_param, skip_update
invoke
:
adamw_impl(param, grad, learning_rate, moment1, moment2, beta1_pow, beta2_pow, master_param, skip_update, beta1, beta2, epsilon, lr_ratio, coeff, with_decay, lazy_mode, min_row_size_to_use_multithread, multi_precision, use_global_beta_pow)
-
api
:
add
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
add
backward
:
add_grad
-
api
:
add_n
args
:
(Tensor[] x)
output
:
Tensor
infer_meta
:
func
:
AddNInferMeta
kernel
:
func
:
add_n
backward
:
add_n_grad
-
api
:
addmm
args
:
(Tensor input, Tensor x, Tensor y, float alpha, float beta)
output
:
Tensor
infer_meta
:
func
:
AddmmInferMeta
kernel
:
func
:
addmm
backward
:
addmm_grad
-
api
:
all
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
all
-
api
:
allclose
args
:
(Tensor x, Tensor y, Scalar rtol, Scalar atol, bool equal_nan)
output
:
Tensor(out)
infer_meta
:
func
:
AllValueCompareInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
allclose
-
api
:
any
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
any
-
api
:
arange
args
:
(Tensor start, Tensor end, Tensor step, DataType dtype, Place place={})
output
:
Tensor
infer_meta
:
func
:
ArangeInferMeta
param
:
[
start
,
end
,
step
]
kernel
:
func
:
arange
param
:
[
start
,
end
,
step
]
data_type
:
dtype
backend
:
place
data_transform
:
support_trans_dtype
:
start, end, step
# arg_max
-
api
:
argmax
args
:
(Tensor x, int64_t axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
infer_meta
:
func
:
ArgMinMaxInferMeta
kernel
:
func
:
arg_max
# arg_min
-
api
:
argmin
args
:
(Tensor x, int64_t axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
infer_meta
:
func
:
ArgMinMaxInferMeta
kernel
:
func
:
arg_min
# argsort
-
api
:
argsort
args
:
(Tensor x, int axis, bool descending)
output
:
Tensor(out), Tensor(indices)
infer_meta
:
func
:
ArgsortInferMeta
kernel
:
func
:
argsort
backward
:
argsort_grad
# asin
-
api
:
asin
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
asin
backward
:
asin_grad
# asinh
-
api
:
asinh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
asinh
backward
:
asinh_grad
# assign
-
api
:
assign
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
assign
backward
:
assign_grad
-
api
:
assign_out_
args
:
(Tensor x, Tensor output)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
assign
param
:
[
x
]
inplace
:
(output -> out)
backward
:
assign_out__grad
# atan
-
api
:
atan
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
atan
backward
:
atan_grad
-
api
:
atan2
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
Atan2InferMeta
kernel
:
func
:
atan2
backward
:
atan2_grad
# atanh
-
api
:
atanh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
atanh
backward
:
atanh_grad
# auc
-
api
:
auc
args
:
(Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, str curve, int num_thresholds, int slide_steps)
output
:
Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out)
infer_meta
:
func
:
AucInferMeta
kernel
:
func
:
auc
# batch_norm
-
api
:
batch_norm
args
:
(Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
output
:
Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
invoke
:
batch_norm_impl(x, scale, bias, mean, variance, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics, fuse_with_relu)
backward
:
batch_norm_grad
-
api
:
bce_loss
args
:
(Tensor input, Tensor label)
output
:
Tensor
infer_meta
:
func
:
BCELossInferMeta
kernel
:
func
:
bce_loss
backward
:
bce_loss_grad
# bernoulli
-
api
:
bernoulli
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
bernoulli
# bitwise_and
-
api
:
bitwise_and
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
bitwise_and
# bitwise_not
-
api
:
bitwise_not
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
bitwise_not
# bitwise_or
-
api
:
bitwise_or
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
bitwise_or
# bitwise_xor
-
api
:
bitwise_xor
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
bitwise_xor
# brelu
-
api
:
brelu
args
:
(Tensor x, float t_min, float t_max)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
brelu
backward
:
brelu_grad
-
api
:
cast
args
:
(Tensor x, DataType out_dtype)
output
:
Tensor
infer_meta
:
func
:
CastInferMeta
kernel
:
func
:
cast
param
:
[
x
,
out_dtype
]
data_type
:
x
backward
:
cast_grad
-
api
:
ceil
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
ceil
backward
:
ceil_grad
-
api
:
celu
args
:
(Tensor x, float alpha)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
celu
backward
:
celu_grad
# cholesky
-
api
:
cholesky
args
:
(Tensor x, bool upper)
output
:
Tensor
infer_meta
:
func
:
CholeskyInferMeta
kernel
:
func
:
cholesky
backward
:
cholesky_grad
# cholesky_solve
-
api
:
cholesky_solve
args
:
(Tensor x, Tensor y, bool upper)
output
:
Tensor
infer_meta
:
func
:
CholeskySolveInferMeta
kernel
:
func
:
cholesky_solve
backward
:
cholesky_solve_grad
-
api
:
clip
args
:
(Tensor x, Scalar(float) min, Scalar(float) max)
output
:
Tensor(out)
inplace
:
(x -> out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
clip
backward
:
clip_grad
-
api
:
concat
args
:
(Tensor[] x, Scalar(int64_t) axis)
output
:
Tensor
infer_meta
:
func
:
ConcatInferMeta
param
:
[
x
,
axis
]
kernel
:
func
:
concat
backward
:
concat_grad
-
api
:
conj
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
conj
backward
:
conj_grad
-
api
:
conv2d
args
:
(Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor
invoke
:
conv2d_impl(input, filter, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search)
backward
:
conv2d_grad
-
api
:
conv2d_transpose
args
:
(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(out)
infer_meta
:
func
:
ConvTransposeInferMeta
kernel
:
func
:
conv2d_transpose
use_gpudnn
:
true
backward
:
conv2d_transpose_grad
-
api
:
conv3d
args
:
(Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor
invoke
:
conv3d_impl(input, filter, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search)
backward
:
conv3d_grad
-
api
:
conv3d_transpose
args
:
(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(out)
infer_meta
:
func
:
ConvTransposeInferMeta
kernel
:
func
:
conv3d_transpose
use_gpudnn
:
true
backward
:
conv3d_transpose_grad
-
api
:
copy_to
args
:
(Tensor x, Place place, bool blocking)
output
:
Tensor
invoke
:
copy_to_impl(x, place, blocking)
# cos
-
api
:
cos
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
cos
backward
:
cos_grad
# cosh
-
api
:
cosh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
cosh
backward
:
cosh_grad
-
api
:
cross
args
:
(Tensor x, Tensor y, int axis = 9)
output
:
Tensor
infer_meta
:
func
:
CrossInferMeta
kernel
:
func
:
cross
backward
:
cross_grad
# Part of python API paddle.nn.functional.cross_entropy
-
api
:
cross_entropy_with_softmax
args
:
(Tensor input, Tensor label, bool soft_label, bool use_softmax, bool numeric_stable_mode, int ignore_index, int axis)
output
:
Tensor(softmax), Tensor(loss)
infer_meta
:
func
:
CrossEntropyWithSoftmaxInferMeta
kernel
:
func
:
cross_entropy_with_softmax
data_type
:
input
backward
:
cross_entropy_with_softmax_grad
-
api
:
cumprod
args
:
(Tensor x, int dim)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cumprod
backward
:
cumprod_grad
-
api
:
cumsum
args
:
(Tensor x, int axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(out)
infer_meta
:
func
:
CumInferMeta
kernel
:
func
:
cumsum
backward
:
cumsum_grad
-
api
:
deformable_conv
args
:
(Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step)
output
:
Tensor(out)
infer_meta
:
func
:
DeformableConvInferMeta
kernel
:
func
:
deformable_conv
data_type
:
x
optional
:
mask
backward
:
deformable_conv_grad
-
api
:
depthwise_conv2d
args
:
(Tensor x, Tensor filter, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn)
output
:
Tensor(out)
infer_meta
:
func
:
ConvInferMeta
param
:
[
x
,
filter
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
use_addto
,
workspace_size_MB
,
exhaustive_search
]
kernel
:
func
:
depthwise_conv2d
param
:
[
x
,
filter
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
use_addto
,
workspace_size_MB
,
exhaustive_search
,
fuse_relu
]
use_gpudnn
:
use_gpudnn
backward
:
depthwise_conv2d_grad
-
api
:
depthwise_conv2d_transpose
args
:
(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(out)
infer_meta
:
func
:
ConvTransposeInferMeta
kernel
:
func
:
depthwise_conv2d_transpose
backward
:
depthwise_conv2d_transpose_grad
-
api
:
det
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
determinant
backward
:
det_grad
-
api
:
diag
args
:
(Tensor x, int offset, float padding_value)
output
:
Tensor
infer_meta
:
func
:
DiagInferMeta
kernel
:
func
:
diag
-
api
:
diagonal
args
:
(Tensor x, int offset, int axis1, int axis2)
output
:
Tensor
infer_meta
:
func
:
DiagonalInferMeta
kernel
:
func
:
diagonal
backward
:
diagonal_grad
-
api
:
digamma
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
digamma
backward
:
digamma_grad
-
api
:
dist
args
:
(Tensor x, Tensor y, float p)
output
:
Tensor
infer_meta
:
func
:
DistInferMeta
kernel
:
func
:
dist
backward
:
dist_grad
-
api
:
divide
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
divide
backward
:
divide_grad
-
api
:
dot
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
DotInferMeta
kernel
:
func
:
dot
-
api
:
dropout
args
:
(Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed)
output
:
Tensor(out), Tensor(mask)
infer_meta
:
func
:
DropoutInferMeta
kernel
:
func
:
dropout
data_type
:
x
optional
:
seed_tensor
backward
:
dropout_grad
# eigh
-
api
:
eigh
args
:
(Tensor x, str uplo)
output
:
Tensor(out_w), Tensor(out_v)
infer_meta
:
func
:
EighInferMeta
kernel
:
func
:
eigh
backward
:
eigh_grad
-
api
:
einsum
args
:
(Tensor[] x, str equation)
output
:
Tensor, Tensor[]{x.size()}, Tensor[]{x.size()}
infer_meta
:
func
:
EinsumInferMeta
param
:
[
x
,
equation
]
kernel
:
func
:
einsum
backward
:
einsum_grad
-
api
:
elementwise_pow
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
elementwise_pow
backward
:
elementwise_pow_grad
# elu
-
api
:
elu
args
:
(Tensor x, float alpha)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
elu
backward
:
elu_grad
-
api
:
embedding
args
:
(Tensor x, Tensor weight, int64_t padding_idx=-1, bool sparse=false)
output
:
Tensor
invoke
:
embedding_impl(x, weight, padding_idx, sparse)
backward
:
embedding_grad
-
api
:
empty
args
:
(IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output
:
Tensor
infer_meta
:
func
:
CreateInferMeta
param
:
[
shape
,
dtype
]
kernel
:
func
:
empty
param
:
[
shape
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
empty_like
args
:
(Tensor x, DataType dtype = DataType::UNDEFINED, Place place = {})
output
:
Tensor
infer_meta
:
func
:
CreateLikeInferMeta
param
:
[
x
,
dtype
]
kernel
:
func
:
empty_like
param
:
[
x
,
dtype
]
data_type
:
dtype > x
backend
:
place > x
-
api
:
equal
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
equal
-
api
:
equal_all
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
CompareAllInferMeta
kernel
:
func
:
equal_all
# erf
-
api
:
erf
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
erf
backward
:
erf_grad
# erfinv
-
api
:
erfinv
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
erfinv
backward
:
erfinv_grad
# exp
-
api
:
exp
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
exp
backward
:
exp_grad
# expand
-
api
:
expand
args
:
(Tensor x, IntArray shape)
output
:
Tensor
infer_meta
:
func
:
ExpandInferMeta
kernel
:
func
:
expand
backward
:
expand_grad
# expand_as
-
api
:
expand_as
args
:
(Tensor x, Tensor y, int[] target_shape)
output
:
Tensor
infer_meta
:
func
:
ExpandAsInferMeta
kernel
:
func
:
expand_as
optional
:
y
backward
:
expand_as_grad
-
api
:
expm1
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
expm1
backward
:
expm1_grad
-
api
:
eye
args
:
(int64_t num_rows, int64_t num_columns, DataType dtype=DataType::FLOAT32, Place place={})
output
:
Tensor(out)
infer_meta
:
func
:
EyeInferMeta
param
:
[
num_rows
,
num_columns
,
dtype
]
kernel
:
func
:
eye
param
:
[
num_rows
,
num_columns
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
flatten
args
:
(Tensor x, int start_axis, int stop_axis)
output
:
Tensor(out), Tensor(xshape)
infer_meta
:
func
:
FlattenWithXShapeInferMeta
kernel
:
func
:
flatten_with_xshape
backend
:
x
inplace
:
(x -> out)
view
:
(x -> out)
intermediate
:
xshape
backward
:
flatten_grad
# flip
-
api
:
flip
args
:
(Tensor x, int[] axis)
output
:
Tensor
infer_meta
:
func
:
FlipInferMeta
kernel
:
func
:
flip
backward
:
flip_grad
-
api
:
floor
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
floor
backward
:
floor_grad
-
api
:
floor_divide
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
floor_divide
-
api
:
fmax
args
:
(Tensor x, Tensor y, int axis)
output
:
Tensor(out)
infer_meta
:
param
:
[
x
,
y
]
func
:
ElementwiseInferMeta
kernel
:
func
:
fmax
backward
:
fmax_grad
-
api
:
fmin
args
:
(Tensor x, Tensor y, int axis)
output
:
Tensor(out)
infer_meta
:
param
:
[
x
,
y
]
func
:
ElementwiseInferMeta
kernel
:
func
:
fmin
backward
:
fmin_grad
-
api
:
frobenius_norm
args
:
(Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMetaBase
kernel
:
func
:
frobenius_norm
backward
:
frobenius_norm_grad
-
api
:
full
args
:
(IntArray shape, Scalar value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output
:
Tensor
infer_meta
:
func
:
CreateInferMeta
param
:
[
shape
,
dtype
]
kernel
:
func
:
full
param
:
[
shape
,
value
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
full_batch_size_like
args
:
(Tensor input, int[] shape, DataType dtype, Scalar value, int input_dim_idx, int output_dim_idx, Place place=CPUPlace())
output
:
Tensor
infer_meta
:
func
:
FullBatchSizeLikeInferMeta
param
:
[
input
,
shape
,
value
,
dtype
,
input_dim_idx
,
output_dim_idx
]
kernel
:
func
:
full_batch_size_like
param
:
[
input
,
shape
,
value
,
dtype
,
input_dim_idx
,
output_dim_idx
]
data_type
:
dtype
backend
:
place
-
api
:
full_like
args
:
(Tensor x, Scalar value, DataType dtype = DataType::UNDEFINED, Place place = {})
output
:
Tensor
infer_meta
:
func
:
CreateLikeInferMeta
param
:
[
x
,
dtype
]
kernel
:
func
:
full_like
param
:
[
x
,
value
,
dtype
]
data_type
:
dtype > x
backend
:
place > x
data_transform
:
skip_transform
:
x
-
api
:
gather
args
:
(Tensor x, Tensor index, Scalar(int) axis=0)
output
:
Tensor(out)
infer_meta
:
func
:
GatherInferMeta
kernel
:
func
:
gather
data_type
:
x
backward
:
gather_grad
-
api
:
gather_nd
args
:
(Tensor x, Tensor index)
output
:
Tensor
infer_meta
:
func
:
GatherNdInferMeta
kernel
:
func
:
gather_nd
data_type
:
x
backward
:
gather_nd_grad
-
api
:
gather_tree
args
:
(Tensor ids, Tensor parents)
output
:
Tensor
infer_meta
:
func
:
GatherTreeMeta
kernel
:
func
:
gather_tree
-
api
:
gaussian_random
args
:
(IntArray shape, float mean, float std, int seed, DataType dtype, Place place={})
output
:
Tensor
infer_meta
:
func
:
GaussianRandomInferMeta
param
:
[
shape
,
mean
,
std
,
seed
,
dtype
]
kernel
:
func
:
gaussian_random
param
:
[
shape
,
mean
,
std
,
seed
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
gelu
args
:
(Tensor x, bool approximate)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
gelu
backward
:
gelu_grad
-
api
:
graph_send_recv
args
:
(Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", int64_t out_size = 0)
output
:
Tensor(out), Tensor(dst_count)
infer_meta
:
func
:
GraphSendRecvInferMeta
kernel
:
func
:
graph_send_recv
data_type
:
x
intermediate
:
dst_count
backward
:
graph_send_recv_grad
-
api
:
greater_equal
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
greater_equal
-
api
:
greater_than
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
greater_than
-
api
:
group_norm
args
:
(Tensor x, Tensor scale, Tensor bias, float epsilon, int groups, str data_layout)
output
:
Tensor(y), Tensor(mean), Tensor(variance)
infer_meta
:
func
:
GroupNormInferMeta
kernel
:
func
:
group_norm
optional
:
scale, bias
intermediate
:
mean, variance
backward
:
group_norm_grad
-
api
:
gumbel_softmax
args
:
(Tensor x, float temperature, bool hard, int axis)
output
:
Tensor
infer_meta
:
func
:
GumbelSoftmaxInferMeta
kernel
:
func
:
gumbel_softmax
backward
:
gumbel_softmax_grad
# hard_shrink
-
api
:
hard_shrink
args
:
(Tensor x, float threshold)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_shrink
backward
:
hard_shrink_grad
# hard_sigmoid
-
api
:
hard_sigmoid
args
:
(Tensor x, float slope, float offset)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_sigmoid
backward
:
hard_sigmoid_grad
-
api
:
hard_swish
args
:
(Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_swish
backward
:
hard_swish_grad
# histogram
-
api
:
histogram
args
:
(Tensor x, int64_t bins, int min, int max)
output
:
Tensor
infer_meta
:
func
:
HistogramInferMeta
kernel
:
func
:
histogram
-
api
:
huber_loss
args
:
(Tensor input, Tensor label, float delta)
output
:
Tensor(out), Tensor(residual)
infer_meta
:
func
:
HuberLossInferMeta
kernel
:
func
:
huber_loss
backward
:
huber_loss_grad
-
api
:
imag
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
RealAndImagInferMeta
kernel
:
func
:
imag
backward
:
imag_grad
# increment
-
api
:
increment
args
:
(Tensor x, float value)
output
:
Tensor
infer_meta
:
func
:
IncrementInferMeta
kernel
:
func
:
increment
-
api
:
index_sample
args
:
(Tensor x, Tensor index)
output
:
Tensor
infer_meta
:
func
:
IndexSampleInferMeta
kernel
:
func
:
index_sample
data_type
:
x
backward
:
index_sample_grad
-
api
:
index_select
args
:
(Tensor x, Tensor index, int dim)
output
:
Tensor(out)
infer_meta
:
func
:
IndexSelectInferMeta
kernel
:
func
:
index_select
data_type
:
x
backward
:
index_select_grad
-
api
:
instance_norm
args
:
(Tensor x, Tensor scale, Tensor bias, float epsilon)
output
:
Tensor(y), Tensor(saved_mean), Tensor(saved_variance)
infer_meta
:
func
:
InstanceNormInferMeta
kernel
:
func
:
instance_norm
data_type
:
x
optional
:
scale, bias
intermediate
:
saved_mean, saved_variance
backward
:
instance_norm_grad
# is_empty
-
api
:
is_empty
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
IsEmptyInferMeta
kernel
:
func
:
is_empty
-
api
:
isclose
args
:
(Tensor x, Tensor y, Scalar rtol, Scalar atol, bool equal_nan)
output
:
Tensor(out)
infer_meta
:
func
:
ValueCompareInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
isclose
# isfinite
-
api
:
isfinite
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
IsfiniteInferMeta
kernel
:
func
:
isfinite, infinite_sr
# isinf
-
api
:
isinf
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
IsfiniteInferMeta
kernel
:
func
:
isinf, isinf_sr
# isnan
-
api
:
isnan
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
IsfiniteInferMeta
kernel
:
func
:
isnan, isnan_sr
-
api
:
kldiv_loss
args
:
(Tensor x, Tensor label, str reduction)
output
:
Tensor(out)
infer_meta
:
func
:
KLDivInferMeta
kernel
:
func
:
kldiv_loss
data_type
:
x
backward
:
kldiv_loss_grad
-
api
:
kron
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
KronInferMeta
kernel
:
func
:
kron
backward
:
kron_grad
-
api
:
kthvalue
args
:
(Tensor x, int k, int axis, bool keepdim)
output
:
Tensor(out), Tensor(indices)
infer_meta
:
func
:
KthvalueInferMeta
kernel
:
func
:
kthvalue
backward
:
kthvalue_grad
# label_smooth
-
api
:
label_smooth
args
:
(Tensor label, Tensor prior_dist, float epsilon)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
label
]
kernel
:
func
:
label_smooth
data_type
:
label
optional
:
prior_dist
backward
:
label_smooth_grad
-
api
:
layer_norm
args
:
(Tensor x, Tensor scale, Tensor bias, float epsilon, int begin_norm_axis, bool is_test)
output
:
Tensor(out), Tensor(mean), Tensor(variance)
infer_meta
:
func
:
LayerNormInferMeta
kernel
:
func
:
layer_norm
data_type
:
x
backward
:
layer_norm_grad
optional
:
scale, bias
# leaky_relu
-
api
:
leaky_relu
args
:
(Tensor x, float alpha)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
leaky_relu
backward
:
leaky_relu_grad
-
api
:
lerp
args
:
(Tensor x, Tensor y, Tensor weight)
output
:
Tensor
infer_meta
:
func
:
LerpInferMeta
kernel
:
func
:
lerp
backward
:
lerp_grad
-
api
:
less_equal
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
less_equal
-
api
:
less_than
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
less_than
-
api
:
lgamma
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
lgamma
backward
:
lgamma_grad
-
api
:
linspace
args
:
(Tensor start, Tensor stop, Tensor number, DataType dtype)
output
:
Tensor
infer_meta
:
func
:
LinspaceInferMeta
kernel
:
func
:
linspace
data_type
:
dtype
-
api
:
log
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
log
backward
:
log_grad
-
api
:
log10
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
log10
backward
:
log10_grad
-
api
:
log1p
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
log1p
backward
:
log1p_grad
-
api
:
log2
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
log2
backward
:
log2_grad
# log_loss
-
api
:
log_loss
args
:
(Tensor input, Tensor label, float epsilon)
output
:
Tensor
infer_meta
:
func
:
LogLossInferMeta
kernel
:
func
:
log_loss
backward
:
log_loss_grad
-
api
:
log_softmax
args
:
(Tensor x, int axis)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMetaCheckAxis
kernel
:
func
:
log_softmax
backward
:
log_softmax_grad
-
api
:
logcumsumexp
args
:
(Tensor x, int axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(out)
infer_meta
:
func
:
CumInferMeta
kernel
:
func
:
logcumsumexp
backward
:
logcumsumexp_grad
# logical_and
-
api
:
logical_and
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
logical_and
# logical_not
-
api
:
logical_not
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
logical_not
# logical_or
-
api
:
logical_or
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
logical_or
# logical_xor
-
api
:
logical_xor
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
logical_xor
# logit
-
api
:
logit
args
:
(Tensor x, float eps = 1e-6f)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
logit
backward
:
logit_grad
# logsigmoid
-
api
:
logsigmoid
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
logsigmoid
backward
:
logsigmoid_grad
-
api
:
logsumexp
args
:
(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all)
output
:
Tensor(out)
infer_meta
:
func
:
LogsumexpInferMeta
kernel
:
func
:
logsumexp
backward
:
logsumexp_grad
# masked_select
-
api
:
masked_select
args
:
(Tensor x, Tensor mask)
output
:
Tensor
infer_meta
:
func
:
MaskedSelectInferMeta
kernel
:
func
:
masked_select
data_type
:
x
backward
:
masked_select_grad
-
api
:
matmul
args
:
(Tensor x, Tensor y, bool transpose_x =
false
, bool transpose_y =
false
)
output
:
Tensor
infer_meta
:
func
:
MatmulInferMeta
kernel
:
func
:
matmul
backward
:
matmul_grad
# matrix_power
-
api
:
matrix_power
args
:
(Tensor x, int n)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
matrix_power
backward
:
matrix_power_grad
-
api
:
matrix_rank
args
:
(Tensor x, float tol, bool use_default_tol=true, bool hermitian=false)
output
:
Tensor(out)
infer_meta
:
func
:
MatrixRankInferMeta
param
:
[
x
,
use_default_tol
,
hermitian
]
kernel
:
func
:
matrix_rank
-
api
:
matrix_rank_tol
args
:
(Tensor x, Tensor atol_tensor, bool use_default_tol=true, bool hermitian=false)
output
:
Tensor(out)
infer_meta
:
func
:
MatrixRankTolInferMeta
kernel
:
func
:
matrix_rank_tol
-
api
:
max
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
max
backward
:
max_grad
-
api
:
max_pool2d_with_index
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output
:
Tensor(out), Tensor(mask)
infer_meta
:
func
:
MaxPoolWithIndexInferMeta
kernel
:
func
:
max_pool2d_with_index
backward
:
max_pool2d_with_index_grad
-
api
:
max_pool3d_with_index
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output
:
Tensor(out), Tensor(mask)
infer_meta
:
func
:
MaxPoolWithIndexInferMeta
kernel
:
func
:
max_pool3d_with_index
backward
:
max_pool3d_with_index_grad
-
api
:
maximum
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
maximum
backward
:
maximum_grad
-
api
:
maxout
args
:
(Tensor x, int groups, int axis)
output
:
Tensor(out)
infer_meta
:
func
:
MaxOutInferMeta
kernel
:
func
:
maxout
backward
:
maxout_grad
-
api
:
mean
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
mean
backward
:
mean_grad
-
api
:
mean_all
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
MeanAllInferMeta
kernel
:
func
:
mean_all
backward
:
mean_all_grad
-
api
:
meshgrid
args
:
(Tensor[] inputs)
output
:
Tensor[]{inputs.size()}
infer_meta
:
func
:
MeshgridInferMeta
kernel
:
func
:
meshgrid
backward
:
meshgrid_grad
-
api
:
min
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
min
backward
:
min_grad
-
api
:
minimum
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
minimum
backward
:
minimum_grad
-
api
:
mish
args
:
(Tensor x, float lambda)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mish
backward
:
mish_grad
-
api
:
mode
args
:
(Tensor x, int axis, bool keepdim)
output
:
Tensor(out), Tensor(indices)
infer_meta
:
func
:
ModeInferMeta
kernel
:
func
:
mode
backward
:
mode_grad
-
api
:
modulo
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
modulo
backward
:
modulo_grad
-
api
:
momentum
args
:
(Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor master_param, float mu, bool use_nesterov =
false
, str regularization_method = "", float regularization_coeff = 0.0, bool multi_precision =
false
, float rescale_grad = 1.0f)
output
:
Tensor(param_out), Tensor(velocity_out), Tensor(master_param_out)
invoke
:
momentum_impl(param, grad, velocity, learning_rate, master_param, mu, use_nesterov, regularization_method, regularization_coeff, multi_precision, rescale_grad)
optional
:
master_param
-
api
:
multi_dot
args
:
(Tensor[] x)
output
:
Tensor
infer_meta
:
func
:
MultiDotInferMeta
kernel
:
func
:
multi_dot
backward
:
multi_dot_grad
# multinomial
-
api
:
multinomial
args
:
(Tensor x, int num_samples, bool replacement)
output
:
Tensor
infer_meta
:
func
:
MultinomialInferMeta
kernel
:
func
:
multinomial
-
api
:
multiplex
args
:
(Tensor[] ins, Tensor ids)
output
:
Tensor
infer_meta
:
func
:
MultiplexInferMeta
kernel
:
func
:
multiplex
data_type
:
ins
backward
:
multiplex_grad
-
api
:
multiply
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
multiply
backward
:
multiply_grad
-
api
:
mv
args
:
(Tensor x, Tensor vec)
output
:
Tensor
infer_meta
:
func
:
MvInferMeta
kernel
:
func
:
mv
backward
:
mv_grad
-
api
:
nll_loss
args
:
(Tensor input, Tensor label, Tensor weight, int64_t ignore_index, str reduction)
output
:
Tensor(out), Tensor(total_weight)
infer_meta
:
func
:
NllLossRawInferMeta
kernel
:
func
:
nll_loss
data_type
:
input
optional
:
weight
backward
:
nll_loss_grad
-
api
:
norm
args
:
(Tensor x, int axis, float epsilon, bool is_test)
output
:
Tensor(out), Tensor(norm)
infer_meta
:
func
:
NormInferMeta
kernel
:
func
:
norm
intermediate
:
norm
backward
:
norm_grad
-
api
:
not_equal
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
not_equal
-
api
:
one_hot
args
:
(Tensor x, Scalar(int) num_classes)
output
:
Tensor
infer_meta
:
func
:
OneHotInferMeta
kernel
:
func
:
one_hot
-
api
:
ones_like
args
:
(Tensor x, DataType dtype=DataType::UNDEFINED, Place place={})
output
:
Tensor
invoke
:
full_like(x, 1, dtype, place)
-
api
:
p_norm
args
:
(Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false)
output
:
Tensor(out)
infer_meta
:
func
:
PNormInferMeta
kernel
:
func
:
p_norm
backward
:
p_norm_grad
# pad
-
api
:
pad
args
:
(Tensor x, int[] paddings, float pad_value)
output
:
Tensor
infer_meta
:
func
:
PadInferMeta
kernel
:
func
:
pad
backward
:
pad_grad
-
api
:
pad3d
args
:
(Tensor x, IntArray paddings, str mode, float pad_value, str data_format)
output
:
Tensor(out)
infer_meta
:
func
:
Pad3dInferMeta
kernel
:
func
:
pad3d
backward
:
pad3d_grad
# pixel_shuffle
-
api
:
pixel_shuffle
args
:
(Tensor x, int upscale_factor, str data_format)
output
:
Tensor
infer_meta
:
func
:
PixelShuffleInferMeta
kernel
:
func
:
pixel_shuffle
backward
:
pixel_shuffle_grad
# poisson
-
api
:
poisson
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
poisson
backward
:
poisson_grad
-
api
:
pool2d
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(out)
infer_meta
:
func
:
PoolInferMeta
kernel
:
func
:
pool2d
use_gpudnn
:
true
backward
:
pool2d_grad
# Used in adaptive_avg_pool2d API
-
api
:
pool2d_gpudnn_unused
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(out)
infer_meta
:
func
:
PoolInferMeta
kernel
:
func
:
pool2d
use_gpudnn
:
false
backward
:
pool2d_grad_gpudnn_unused
-
api
:
pool3d
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(out)
infer_meta
:
func
:
PoolInferMeta
kernel
:
func
:
pool3d
use_gpudnn
:
true
backward
:
pool3d_grad
-
api
:
pow
args
:
(Tensor x, Scalar s)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pow
backward
:
pow_grad
-
api
:
prelu
args
:
(Tensor x, Tensor alpha, str data_format, str mode)
output
:
Tensor(out)
infer_meta
:
func
:
PReluInferMeta
kernel
:
func
:
prelu
backward
:
prelu_grad
-
api
:
psroi_pool
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
output
:
Tensor
infer_meta
:
func
:
PsroiPoolInferMeta
kernel
:
func
:
psroi_pool
data_type
:
x
optional
:
boxes_num
backward
:
psroi_pool_grad
# put_along_axis
-
api
:
put_along_axis
args
:
(Tensor x, Tensor index, Tensor value, int axis, str reduce)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
index
]
kernel
:
func
:
put_along_axis
data_type
:
x
backward
:
put_along_axis_grad
-
api
:
qr
args
:
(Tensor x, str mode)
output
:
Tensor(q), Tensor(r)
infer_meta
:
func
:
QrInferMeta
kernel
:
func
:
qr
# backward : qr_grad
-
api
:
randint
args
:
(int low, int high, IntArray shape, DataType dtype=DataType::INT64, Place place={})
output
:
Tensor(out)
infer_meta
:
func
:
RandintInferMeta
param
:
[
low
,
high
,
shape
,
dtype
]
kernel
:
func
:
randint
param
:
[
low
,
high
,
shape
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
randperm
args
:
(int n, DataType dtype, Place place={})
output
:
Tensor
infer_meta
:
func
:
RandpermInferMeta
param
:
[
n
,
dtype
]
kernel
:
func
:
randperm
param
:
[
n
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
real
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
RealAndImagInferMeta
kernel
:
func
:
real
backward
:
real_grad
-
api
:
reciprocal
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
reciprocal
backward
:
reciprocal_grad
# reduce_prod
-
api
:
reduce_prod
args
:
(Tensor x, int64_t[] dims, bool keep_dim, bool reduce_all)
output
:
Tensor
infer_meta
:
func
:
ReduceInferMetaBase
kernel
:
func
:
prod_raw
backward
:
reduce_prod_grad
-
api
:
relu
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
relu
inplace
:
(x -> out)
backward
:
relu_grad
-
api
:
reshape
args
:
(Tensor x, IntArray shape)
output
:
Tensor(out), Tensor(xshape)
infer_meta
:
func
:
ReshapeWithXShapeInferMeta
kernel
:
func
:
reshape_with_xshape
inplace
:
(x -> out)
view
:
(x -> out)
intermediate
:
xshape
backward
:
reshape_grad
-
api
:
roi_align
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned)
output
:
Tensor
infer_meta
:
func
:
RoiAlignInferMeta
kernel
:
func
:
roi_align
data_type
:
x
optional
:
boxes_num
backward
:
roi_align_grad
-
api
:
roi_pool
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale)
output
:
Tensor(out), Tensor(arg_max)
infer_meta
:
func
:
RoiPoolInferMeta
kernel
:
func
:
roi_pool
data_type
:
x
optional
:
boxes_num
intermediate
:
arg_max
backward
:
roi_pool_grad
-
api
:
roll
args
:
(Tensor x, IntArray shifts, int64_t[] axis)
output
:
Tensor(out)
infer_meta
:
func
:
RollInferMeta
kernel
:
func
:
roll
backward
:
roll_grad
-
api
:
round
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
round
backward
:
round_grad
-
api
:
rsqrt
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
rsqrt
inplace
:
(x -> out)
backward
:
rsqrt_grad
-
api
:
scale
args
:
(Tensor x, Scalar scale, float bias, bool bias_after_scale)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
scale, scale_sr
inplace
:
(x -> out)
backward
:
scale_grad
-
api
:
scatter
args
:
(Tensor x, Tensor index, Tensor updates, bool overwrite)
output
:
Tensor
infer_meta
:
func
:
ScatterInferMeta
dtype
:
x
kernel
:
func
:
scatter
backward
:
scatter_grad
-
api
:
scatter_nd_add
args
:
(Tensor x, Tensor index, Tensor updates)
output
:
Tensor
infer_meta
:
func
:
ScatterNdAddInferMeta
dtype
:
x
kernel
:
func
:
scatter_nd_add
backward
:
scatter_nd_add_grad
-
api
:
searchsorted
args
:
(Tensor sorted_sequence, Tensor value, bool out_int32, bool right)
output
:
Tensor(out)
infer_meta
:
func
:
SearchsortedInferMeta
kernel
:
func
:
searchsorted
data_type
:
sorted_sequence
# segment_pool
-
api
:
segment_pool
args
:
(Tensor x, Tensor segment_ids, str pooltype)
output
:
Tensor(out), Tensor(summed_ids)
infer_meta
:
func
:
SegmentPoolInferMeta
kernel
:
func
:
segment_pool
data_type
:
x
backward
:
segment_pool_grad
# selu
-
api
:
selu
args
:
(Tensor x, float scale, float alpha)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
selu
backward
:
selu_grad
-
api
:
sgd
args
:
(Tensor param, Tensor learning_rate, Tensor grad, Tensor master_param, bool multi_precision)
output
:
Tensor(param_out), Tensor(master_param_out)
invoke
:
sgd_impl(param, learning_rate, grad, master_param, multi_precision)
optional
:
master_param
-
api
:
shape
args
:
(Tensor input)
output
:
Tensor
infer_meta
:
func
:
ShapeInferMeta
kernel
:
func
:
shape, shape_sr
data_transform
:
skip_transform
:
input
# shard_index
-
api
:
shard_index
args
:
(Tensor in, int index_num, int nshards, int shard_id, int ignore_value)
output
:
Tensor
infer_meta
:
func
:
ShardIndexInferMeta
kernel
:
func
:
shard_index
# sigmoid
-
api
:
sigmoid
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sigmoid
backward
:
sigmoid_grad
# sigmoid_cross_entropy_with_logits
-
api
:
sigmoid_cross_entropy_with_logits
args
:
(Tensor x, Tensor label, bool normalize, int ignore_index)
output
:
Tensor
infer_meta
:
func
:
SigmoidCrossEntropyWithLogitsInferMeta
kernel
:
func
:
sigmoid_cross_entropy_with_logits
backward
:
sigmoid_cross_entropy_with_logits_grad
-
api
:
sign
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sign
# silu
-
api
:
silu
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
silu
backward
:
silu_grad
# sin
-
api
:
sin
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sin
backward
:
sin_grad
# sinh
-
api
:
sinh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sinh
backward
:
sinh_grad
# size
-
api
:
size
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
SizeInferMeta
kernel
:
func
:
size
data_transform
:
skip_transform
:
x
-
api
:
slice
args
:
(Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
output
:
Tensor
infer_meta
:
func
:
SliceRawInferMeta
kernel
:
func
:
slice
backward
:
slice_grad
# soft_shrink
-
api
:
soft_shrink
args
:
(Tensor x, float lambda)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
soft_shrink
backward
:
soft_shrink_grad
-
api
:
softmax
args
:
(Tensor x, int axis)
output
:
Tensor
infer_meta
:
func
:
SoftmaxInferMeta
kernel
:
func
:
softmax
use_gpudnn
:
true
backward
:
softmax_grad
-
api
:
split
args
:
(Tensor x, IntArray num_or_sections, Scalar(int) axis)
output
:
Tensor[]
invoke
:
split_impl(x, num_or_sections, axis)
backward
:
split_grad
-
api
:
sqrt
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sqrt
backward
:
sqrt_grad
-
api
:
square
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
square
backward
:
square_grad
-
api
:
squeeze
args
:
(Tensor x, int[] axes)
output
:
Tensor(out), Tensor(xshape)
infer_meta
:
func
:
SqueezeInferMeta
kernel
:
func
:
squeeze
view
:
(x -> out)
intermediate
:
xshape
backward
:
squeeze_grad
-
api
:
stack
args
:
(Tensor[] x, int axis)
output
:
Tensor
infer_meta
:
func
:
StackInferMeta
kernel
:
func
:
stack
backward
:
stack_grad
-
api
:
strided_slice
args
:
(Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides)
output
:
Tensor
infer_meta
:
func
:
StridedSliceInferMeta
kernel
:
func
:
strided_slice
backward
:
strided_slice_grad
-
api
:
subtract
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
subtract
backward
:
subtract_grad
-
api
:
sum
args
:
(Tensor x, int64_t[] dims={}, DataType out_dtype=DataType::UNDEFINED, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
SumInferMeta
kernel
:
func
:
sum
data_type
:
x
backward
:
sum_grad
# The python API paddle.nn.functional.swish has no `bete` argument, it may be removed later
-
api
:
swish
args
:
(Tensor x, float beta=1.0)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
swish
backward
:
swish_grad
# take_along_axis
-
api
:
take_along_axis
args
:
(Tensor x, Tensor index, int axis)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
index
]
kernel
:
func
:
take_along_axis
data_type
:
x
backward
:
take_along_axis_grad
# tan
-
api
:
tan
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
tan
backward
:
tan_grad
# tanh
-
api
:
tanh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
tanh
backward
:
tanh_grad
# tanh_shrink
-
api
:
tanh_shrink
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
tanh_shrink
backward
:
tanh_shrink_grad
# thresholded_relu
-
api
:
thresholded_relu
args
:
(Tensor x, float threshold)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
thresholded_relu
backward
:
thresholded_relu_grad
# tile
-
api
:
tile
args
:
(Tensor x, IntArray repeat_times)
output
:
Tensor
infer_meta
:
func
:
TileInferMeta
kernel
:
func
:
tile
backward
:
tile_grad
-
api
:
top_k
args
:
(Tensor x, Scalar k, int axis = -1, bool largest =
true
, bool sorted =
true
)
output
:
Tensor(out), Tensor(indices)
infer_meta
:
func
:
TopKInferMeta
kernel
:
func
:
top_k
backward
:
top_k_grad
-
api
:
trace
args
:
(Tensor x, int offset, int axis1, int axis2)
output
:
Tensor
infer_meta
:
func
:
TraceInferMeta
kernel
:
func
:
trace
backward
:
trace_grad
-
api
:
transpose
args
:
(Tensor x, int[] axis)
output
:
Tensor
infer_meta
:
func
:
TransposeInferMeta
kernel
:
func
:
transpose
backward
:
transpose_grad
-
api
:
triangular_solve
args
:
(Tensor x, Tensor y, bool upper, bool transpose, bool unitriangular)
output
:
Tensor
infer_meta
:
func
:
TriangularSolveInferMeta
kernel
:
func
:
triangular_solve
backward
:
triangular_solve_grad
-
api
:
tril_indices
args
:
(int rows, int cols, int offset, DataType dtype, Place place={})
output
:
Tensor(out)
infer_meta
:
func
:
TrilIndicesInferMeta
param
:
[
rows
,
cols
,
offset
,
dtype
]
kernel
:
func
:
tril_indices
param
:
[
rows
,
cols
,
offset
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
tril_triu
args
:
(Tensor x, int diagonal, bool lower)
output
:
Tensor(out)
infer_meta
:
func
:
TrilTriuInferMeta
kernel
:
func
:
tril_triu
backward
:
tril_triu_grad
-
api
:
trunc
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
trunc
backward
:
trunc_grad
# python API: paddle.nn.initializer.TruncatedNormal
-
api
:
truncated_gaussian_random
args
:
(int[] shape, float mean, float std, int seed, DataType dtype=DataType::FLOAT32, Place place={})
output
:
Tensor
infer_meta
:
func
:
TruncatedGaussianRandomInferMeta
param
:
[
shape
,
mean
,
std
,
seed
,
dtype
]
kernel
:
func
:
truncated_gaussian_random
param
:
[
shape
,
mean
,
std
,
seed
,
dtype
]
backend
:
place
data_type
:
dtype
-
api
:
unbind
args
:
(Tensor input, int axis)
output
:
Tensor[] {axis<0 ? input.dims()[input.dims().size()+axis]:input.dims()[axis]}
infer_meta
:
func
:
UnbindInferMeta
kernel
:
func
:
unbind
backward
:
unbind_grad
# unfold
-
api
:
unfold
args
:
(Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output
:
Tensor
infer_meta
:
func
:
UnfoldInferMeta
kernel
:
func
:
unfold
backward
:
unfold_grad
-
api
:
uniform_random
args
:
(IntArray shape, DataType dtype, float min, float max, int seed, Place place={})
output
:
Tensor(out)
infer_meta
:
func
:
UniformRandomInferMeta
param
:
[
shape
,
dtype
,
min
,
max
,
seed
]
kernel
:
func
:
uniform_random
param
:
[
shape
,
dtype
,
min
,
max
,
seed
]
data_type
:
dtype
backend
:
place
# The `axis` argument of Python API paddle.unique is not vector
-
api
:
unique
args
:
(Tensor x, bool return_index, bool return_inverse, bool return_counts, int[] axis, DataType dtype=DataType::INT64)
output
:
Tensor(out), Tensor(indices), Tensor(inverse), Tensor(counts)
infer_meta
:
func
:
UniqueInferMeta
kernel
:
func
:
unique
data_type
:
x
-
api
:
unsqueeze
args
:
(Tensor x, IntArray axis)
output
:
Tensor(out), Tensor(xshape)
infer_meta
:
func
:
UnsqueezeInferMeta
kernel
:
func
:
unsqueeze
view
:
(x -> out)
intermediate
:
xshape
backward
:
unsqueeze_grad
# viterbi_decode
-
api
:
viterbi_decode
args
:
(Tensor input, Tensor transition, Tensor length, bool include_bos_eos_tag)
output
:
Tensor(scores), Tensor(path)
infer_meta
:
func
:
ViterbiDecodeInferMeta
kernel
:
func
:
viterbi_decode
data_type
:
input
-
api
:
where
args
:
(Tensor condition, Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
WhereInferMeta
kernel
:
func
:
where
backward
:
where_grad
# where_index
-
api
:
where_index
args
:
(Tensor condition)
output
:
Tensor
infer_meta
:
func
:
WhereIndexInferMeta
kernel
:
func
:
where_index
# yolo_box
-
api
:
yolo_box
args
:
(Tensor x, Tensor img_size, int[] anchors, int class_num, float conf_thresh, int downsample_ratio, bool clip_bbox, float scale_x_y=1.0, bool iou_aware=false, float iou_aware_factor=0.5)
output
:
Tensor(boxes), Tensor(scores)
infer_meta
:
func
:
YoloBoxInferMeta
kernel
:
func
:
yolo_box
data_type
:
x
-
api
:
zeros_like
args
:
(Tensor x, DataType dtype=DataType::UNDEFINED, Place place = {})
output
:
Tensor
invoke
:
full_like(x, 0, dtype, place)
python/paddle/utils/code_gen/backward.yaml
浏览文件 @
fcd32950
#- backward_api : einsum_grad
#forward : einsum (Tensor[] x, str equation) -> Tensor(out), Tensor[](inner_cache)
#args : (Tensor[] x, Tensor[] inner_cache, Tensor out_grad, str equation)
#output : Tensor[](x_grad){x.size()}
#infer_meta :
#func : UnchangedMultiInferMeta
#param : [x]
#kernel :
#func : einsum_grad
-
backward_api
:
abs_double_grad
forward
:
abs_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_x_grad)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
abs_double_grad
data_transform
:
skip_transform
:
grad_x_grad
-
backward_api
:
abs_grad
forward
:
abs (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
abs_grad
data_transform
:
skip_transform
:
out_grad
backward
:
abs_double_grad
-
backward_api
:
acos_grad
forward
:
acos (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
acos_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
acosh_grad
forward
:
acosh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
acosh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
add_double_grad
forward
:
add_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
grad_out
]
kernel
:
func
:
add_double_grad
optional
:
grad_x_grad, grad_y_grad
backward
:
add_triple_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
add_grad
forward
:
add (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
add_grad
no_need_buffer
:
x, y
backward
:
add_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
add_n_grad
forward
:
add_n (Tensor[] x) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad)
output
:
Tensor[](x_grad){x.size()}
invoke
:
add_n_grad_impl(x, out_grad, x_grad)
no_need_buffer
:
x
-
backward_api
:
add_triple_grad
forward
:
add_double_grad (Tensor y, Tensor grad_out, Tensor grad_grad_x, Tensor grad_grad_y, int axis = -1) -> Tensor(grad_grad_out)
args
:
(Tensor grad_grad_x, Tensor grad_grad_y, Tensor grad_grad_out_grad, int axis = -1)
output
:
Tensor(grad_grad_x_grad), Tensor(grad_grad_y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
grad_grad_x
,
grad_grad_y
]
kernel
:
func
:
add_triple_grad
inplace
:
(grad_grad_out_grad -> grad_grad_x_grad)
-
backward_api
:
addmm_grad
forward
:
addmm (Tensor input, Tensor x, Tensor y, float alpha, float beta) -> Tensor(out)
args
:
(Tensor input, Tensor x, Tensor y, Tensor out_grad, float alpha, float beta)
output
:
Tensor(input_grad), Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
input
,
x
,
y
]
kernel
:
func
:
addmm_grad
-
backward_api
:
argsort_grad
forward
:
argsort (Tensor x, int axis, bool descending) -> Tensor(out), Tensor(indices)
args
:
(Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
argsort_grad
no_need_buffer
:
x
-
backward_api
:
asin_grad
forward
:
asin (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
asin_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
asinh_grad
forward
:
asinh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
asinh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
assign_grad
forward
:
assign (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
assign
inplace
:
(out_grad -> x_grad)
-
backward_api
:
assign_out__grad
forward
:
assign_out_ (Tensor x, Tensor output) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
assign
inplace
:
(out_grad -> x_grad)
-
backward_api
:
atan2_grad
forward
:
atan2 (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
atan2_grad
-
backward_api
:
atan_grad
forward
:
atan (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
atan_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
atanh_grad
forward
:
atanh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
atanh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
batch_norm_double_grad
forward
:
batch_norm_grad (Tensor x, Tensor scale, Tensor bias, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor grad_out, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias)
args
:
(Tensor x, Tensor scale, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor grad_out, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
x
,
scale
,
x
]
kernel
:
func
:
batch_norm_grad_grad
data_type
:
x
optional
:
out_mean, out_variance
inplace
:
(grad_out -> grad_out_grad)
-
backward_api
:
batch_norm_grad
forward
:
batch_norm (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
args
:
(Tensor x, Tensor scale, Tensor bias, Tensor mean_out, Tensor variance_out, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
x
,
scale
,
bias
]
kernel
:
func
:
batch_norm_grad
data_type
:
out_grad
optional
:
mean_out, variance_out, reserve_space
backward
:
batch_norm_double_grad
-
backward_api
:
bce_loss_grad
forward
:
bce_loss (Tensor input, Tensor label) -> Tensor(out)
args
:
(Tensor input, Tensor label, Tensor out_grad)
output
:
Tensor(input_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
input
]
kernel
:
func
:
bce_loss_grad
inplace
:
(out_grad -> input_grad)
-
backward_api
:
brelu_grad
forward
:
brelu (Tensor x, float t_min, float t_max) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float t_min, float t_max)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
brelu_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
cast_grad
forward
:
cast (Tensor x, DataType out_dtype) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cast_grad
data_type
:
out_grad
no_need_buffer
:
x
-
backward_api
:
ceil_grad
forward
:
ceil(Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
ceil_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
celu_double_grad
forward
:
celu_grad(Tensor x, Tensor grad_out, float alpha) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_out, Tensor grad_x_grad, float alpha)
output
:
Tensor(x_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
x
]
kernel
:
func
:
celu_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
celu_grad
forward
:
celu(Tensor x, float alpha) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
celu_grad
backward
:
celu_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
cholesky_grad
forward
:
cholesky (Tensor x, bool upper) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, bool upper)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
cholesky_grad
-
backward_api
:
cholesky_solve_grad
forward
:
cholesky_solve (Tensor x, Tensor y, bool upper) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
cholesky_solve_grad
-
backward_api
:
clip_double_grad
forward
:
clip_grad (Tensor x, Tensor grad_out, Scalar min = 0., Scalar max = 0.) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_x_grad, Scalar min = 0., Scalar max = 0.)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
clip_grad
-
backward_api
:
clip_grad
forward
:
clip (Tensor x, Scalar min, Scalar max) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, Scalar min = 0., Scalar max = 0.)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
clip_grad
backward
:
clip_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
concat_double_grad
forward
:
concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x)
args
:
(Tensor[] grad_x_grad, Scalar axis = 0)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
ConcatInferMeta
param
:
[
grad_x_grad
,
axis
]
kernel
:
func
:
concat
-
backward_api
:
concat_grad
forward
:
concat (Tensor[] x, Scalar axis) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad, Scalar axis = 0)
output
:
Tensor[](x_grad){x.size()}
infer_meta
:
func
:
UnchangedMultiInferMeta
param
:
[
x
]
kernel
:
func
:
concat_grad
no_need_buffer
:
x
backward
:
concat_double_grad
-
backward_api
:
conj_grad
forward
:
conj (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
conj
-
backward_api
:
conv2d_grad
forward
:
conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
args
:
(Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor(input_grad), Tensor(filter_grad)
invoke
:
conv2d_grad_impl(input, filter, out_grad, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, input_grad, filter_grad)
backward
:
conv2d_grad_grad
-
backward_api
:
conv2d_grad_grad
forward
:
conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(grad_input), Tensor(grad_filter)
args
:
(Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
input
,
filter
,
grad_out
]
kernel
:
func
:
conv2d_grad_grad
use_gpudnn
:
true
optional
:
grad_input_grad, grad_filter_grad
-
backward_api
:
conv2d_transpose_double_grad
forward
:
conv2d_transpose_grad(Tensor x, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_x), Tensor(grad_filter)
args
:
(Tensor x, Tensor filter, Tensor grad_out, Tensor grad_x_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(x_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
Conv2dTransposeDoubleGradInferMeta
kernel
:
func
:
conv2d_transpose_grad_grad
use_gpudnn
:
true
-
backward_api
:
conv2d_transpose_grad
forward
:
conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args
:
(Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(x_grad), Tensor(filter_grad)
infer_meta
:
func
:
ConvTransposeGradInferMeta
kernel
:
func
:
conv2d_transpose_grad
use_gpudnn
:
true
backward
:
conv2d_transpose_double_grad
-
backward_api
:
conv3d_grad
forward
:
conv3d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
args
:
(Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor(input_grad), Tensor(filter_grad)
invoke
:
conv3d_grad_impl(input, filter, out_grad, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, input_grad, filter_grad)
backward
:
conv3d_grad_grad
-
backward_api
:
conv3d_grad_grad
forward
:
conv3d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(grad_input), Tensor(grad_filter)
args
:
(Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
input
,
filter
,
grad_out
]
kernel
:
func
:
conv3d_grad_grad
use_gpudnn
:
true
optional
:
grad_input_grad, grad_filter_grad
-
backward_api
:
conv3d_transpose_grad
forward
:
conv3d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args
:
(Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(x_grad), Tensor(filter_grad)
infer_meta
:
func
:
ConvTransposeGradInferMeta
kernel
:
func
:
conv3d_transpose_grad
use_gpudnn
:
true
-
backward_api
:
cos_grad
forward
:
cos (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cos_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
cosh_grad
forward
:
cosh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cosh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
cross_entropy_with_softmax_grad
forward
:
cross_entropy_with_softmax (Tensor input, Tensor label, bool soft_label, bool use_softmax, bool numeric_stable_mode, int ignore_index, int axis) -> Tensor(softmax), Tensor(loss)
args
:
(Tensor label, Tensor softmax, Tensor loss_grad, bool soft_label, bool use_softmax, bool numeric_stable_mode, int ignore_index, int axis)
output
:
Tensor(input_grad)
infer_meta
:
func
:
CrossEntropyWithSoftmaxGradInferMeta
kernel
:
func
:
cross_entropy_with_softmax_grad
data_type
:
softmax
inplace
:
(softmax -> input_grad)
-
backward_api
:
cross_grad
forward
:
cross (Tensor x, Tensor y, int axis = 9) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
cross_grad
-
backward_api
:
cumprod_grad
forward
:
cumprod (Tensor x, int dim) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int dim)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cumprod_grad
-
backward_api
:
cumsum_grad
forward
:
cumsum(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
args
:
(Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(x_grad)
invoke
:
cumsum(out_grad, axis, flatten, exclusive, !reverse)
-
backward_api
:
deformable_conv_grad
forward
:
deformable_conv(Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step) -> Tensor(out)
args
:
(Tensor x, Tensor offset, Tensor filter, Tensor mask, Tensor out_grad, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step)
output
:
Tensor(x_grad), Tensor(offset_grad), Tensor(filter_grad), Tensor(mask_grad)
infer_meta
:
func
:
DeformableConvGradInferMeta
kernel
:
func
:
deformable_conv_grad
data_type
:
x
optional
:
mask
-
backward_api
:
depthwise_conv2d_grad
forward
:
depthwise_conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn) -> Tensor(out)
args
:
(Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn)
output
:
Tensor(input_grad), Tensor(filter_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
input
,
filter
]
kernel
:
func
:
depthwise_conv2d_grad
param
:
[
input
,
filter
,
out_grad
,
strides
,
paddings
,
paddding_algorithm
,
groups
,
dilations
,
data_format
,
use_addto
,
workspace_size_MB
,
exhaustive_search
,
fuse_relu
]
use_gpudnn
:
use_gpudnn
backward
:
depthwise_conv2d_grad_grad
-
backward_api
:
depthwise_conv2d_grad_grad
forward
:
depthwise_conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn) -> Tensor(grad_input), Tensor(grad_filter)
args
:
(Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu)
output
:
Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
input
,
filter
,
grad_out
]
kernel
:
func
:
depthwise_conv2d_grad_grad
optional
:
grad_input_grad, grad_filter_grad
-
backward_api
:
depthwise_conv2d_transpose_grad
forward
:
depthwise_conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args
:
(Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(x_grad), Tensor(filter_grad)
infer_meta
:
func
:
ConvTransposeGradInferMeta
kernel
:
func
:
depthwise_conv2d_transpose_grad
-
backward_api
:
det_grad
forward
:
det (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
determinant_grad
-
backward_api
:
diagonal_grad
forward
:
diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
diagonal_grad
no_need_buffer
:
x
-
backward_api
:
digamma_grad
forward
:
digamma (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
digamma_grad
-
backward_api
:
dist_grad
forward
:
dist (Tensor x, Tensor y, float p) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, float p)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
dist_grad
-
backward_api
:
divide_double_grad
forward
:
divide_grad (Tensor x, Tensor y, Tensor out, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor y, Tensor out, Tensor grad_x, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
output
:
Tensor(y_grad), Tensor(out_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
y
,
grad_x
,
grad_x
]
kernel
:
func
:
divide_double_grad
data_type
:
out
optional
:
grad_x_grad, grad_y_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
divide_grad
forward
:
divide (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
divide_grad
backward
:
divide_double_grad
-
backward_api
:
dropout_grad
forward
:
dropout (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed) -> Tensor(out), Tensor(mask)
args
:
(Tensor mask, Tensor out_grad, float p, bool is_test, str mode)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
dropout_grad
-
backward_api
:
eigh_grad
forward
:
eigh (Tensor x, str uplo) -> Tensor(out_w), Tensor(out_v)
args
:
(Tensor out_w, Tensor out_v, Tensor out_w_grad, Tensor out_v_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_v
]
kernel
:
func
:
eigh_grad
data_type
:
out_v
data_transform
:
skip_transform
:
out_w, out_w_grad
-
backward_api
:
einsum_grad
forward
:
einsum (Tensor[] x, str equation) -> Tensor(out), Tensor[](inner_cache), Tensor[](x_shape)
args
:
(Tensor[] x_shape, Tensor[] inner_cache, Tensor out_grad, str equation)
output
:
Tensor[](x_grad){x.size()}
infer_meta
:
func
:
UnchangedMultiInferMeta
param
:
[
x_shape
]
kernel
:
func
:
einsum_grad
-
backward_api
:
elementwise_pow_grad
forward
:
elementwise_pow(Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis=-1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
elementwise_pow_grad
-
backward_api
:
elu_double_grad
forward
:
elu_grad (Tensor x, Tensor out, Tensor grad_out, float alpha)-> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_out, Tensor grad_x_grad, float alpha)
output
:
Tensor(x_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
x
]
kernel
:
func
:
elu_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
elu_grad
forward
:
elu (Tensor x, float alpha) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
elu_grad
backward
:
elu_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
embedding_grad
forward
:
embedding (Tensor x, Tensor weight, int64_t padding_idx=-1, bool sparse=false) -> Tensor(out)
args
:
(Tensor x, Tensor weight, Tensor out_grad, int64_t padding_idx=-1, bool sparse=false)
output
:
Tensor(weight_grad)
invoke
:
embedding_grad_impl(x, weight, out_grad, padding_idx, sparse, weight_grad)
-
backward_api
:
erf_grad
forward
:
erf (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
erf_grad
data_type
:
out_grad
-
backward_api
:
erfinv_grad
forward
:
erfinv (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
erfinv_grad
-
backward_api
:
exp_grad
forward
:
exp (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
exp_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
expand_as_grad
forward
:
expand_as (Tensor x, Tensor y, int[] target_shape) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int[] target_shape)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
expand_as_grad
no_need_buffer
:
x
-
backward_api
:
expand_double_grad
forward
:
expand_grad (Tensor x, Tensor grad_out, IntArray shape) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, IntArray shape)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
ExpandInferMeta
kernel
:
func
:
expand
-
backward_api
:
expand_grad
forward
:
expand (Tensor x, IntArray shape) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, IntArray shape)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
expand_grad
no_need_buffer
:
x
backward
:
expand_double_grad
-
backward_api
:
expm1_grad
forward
:
expm1 (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
expm1_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
flatten_grad
forward
:
flatten(Tensor x, int start_axis, int stop_axis) -> Tensor(out), Tensor(xshape)
args
:
(Tensor xshape, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
KernelWithXShapeInferMeta
param
:
[
xshape
]
kernel
:
func
:
flatten_grad
data_type
:
out_grad
backend
:
out_grad
layout
:
out_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
flip_grad
forward
:
flip (Tensor x, int[] axis) -> Tensor(out)
args
:
(Tensor out_grad, int[] axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
flip
-
backward_api
:
floor_grad
forward
:
floor(Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
floor_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
fmax_grad
forward
:
fmax(Tensor x, Tensor y, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
fmax_grad
-
backward_api
:
fmin_grad
forward
:
fmin(Tensor x, Tensor y, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
fmin_grad
-
backward_api
:
frobenius_norm_grad
forward
:
frobenius_norm(Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keep_dim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
frobenius_norm_grad
-
backward_api
:
gather_grad
forward
:
gather(Tensor x, Tensor index, Scalar axis=0) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, Scalar axis=0, bool overwrite=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
data_type
:
x
func
:
gather_grad
no_need_buffer
:
x
-
backward_api
:
gather_nd_grad
forward
:
gather_nd (Tensor x, Tensor index) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
gather_nd_grad
no_need_buffer
:
x
-
backward_api
:
gelu_grad
forward
:
gelu(Tensor x, bool approximate) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, bool approximate)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
gelu_grad
-
backward_api
:
graph_send_recv_grad
forward
:
graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", int64_t out_size = 0) -> Tensor(out), Tensor(dst_count)
args
:
(Tensor x, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str pool_type = "SUM")
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
graph_send_recv_grad
data_type
:
out_grad
optional
:
out, dst_count
-
backward_api
:
group_norm_grad
forward
:
group_norm (Tensor x, Tensor scale, Tensor bias, float epsilon, int groups, str data_layout) -> Tensor(y), Tensor(mean), Tensor(variance)
args
:
(Tensor x, Tensor scale, Tensor bias, Tensor y, Tensor mean, Tensor variance, Tensor y_grad, float epsilon, int groups, str data_layout)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
y
,
scale
,
bias
]
kernel
:
func
:
group_norm_grad
data_type
:
y_grad
optional
:
scale, bias
inplace
:
(y_grad -> x_grad)
-
backward_api
:
gumbel_softmax_grad
forward
:
gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GumbelSoftmaxGradInferMeta
param
:
[
out
,
out_grad
,
axis
]
kernel
:
func
:
gumbel_softmax_grad
-
backward_api
:
hard_shrink_grad
forward
:
hard_shrink (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_shrink_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
hard_sigmoid_grad
forward
:
hard_sigmoid (Tensor x, float slope, float offset) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, float slope, float offset)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
hard_sigmoid_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
hard_swish_grad
forward
:
hard_swish (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold, float scale, float offset)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_swish_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
huber_loss_grad
forward
:
huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
args
:
(Tensor residual, Tensor out_grad, float delta)
output
:
Tensor(input_grad), Tensor(label_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
residual
,
residual
]
kernel
:
func
:
huber_loss_grad
-
backward_api
:
imag_grad
forward
:
imag (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
invoke
:
imag_grad_impl(out_grad, x_grad)
-
backward_api
:
index_sample_grad
forward
:
index_sample (Tensor x, Tensor index) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
index_sample_grad
data_type
:
out_grad
no_need_buffer
:
x
-
backward_api
:
index_select_grad
forward
:
index_select(Tensor x, Tensor index, int dim) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, int dim)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
index_select_grad
data_type
:
x
no_need_buffer
:
x
-
backward_api
:
instance_norm_double_grad
forward
:
instance_norm_grad(Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, float epsilon) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias)
args
:
(Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float epsilon)
output
:
Tensor(x_grad), Tensor(fwd_scale_grad), Tensor(grad_y_grad)
infer_meta
:
func
:
InstanceNormDoubleGradInferMeta
kernel
:
func
:
instance_norm_double_grad
data_type
:
x
optional
:
fwd_scale, grad_x_grad, grad_scale_grad, grad_bias_grad
-
backward_api
:
instance_norm_grad
forward
:
instance_norm(Tensor x, Tensor scale, Tensor bias, float epsilon) -> Tensor(y), Tensor(saved_mean), Tensor(saved_variance)
args
:
(Tensor x, Tensor scale, Tensor saved_mean, Tensor saved_variance, Tensor y_grad, float epsilon)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta
:
func
:
InstanceNormGradInferMeta
kernel
:
func
:
instance_norm_grad
data_type
:
x
optional
:
scale
backward
:
instance_norm_double_grad
-
backward_api
:
kldiv_loss_grad
forward
:
kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out)
args
:
(Tensor x, Tensor label, Tensor out_grad, str reduction)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
kldiv_loss_grad
no_need_buffer
:
x
-
backward_api
:
kron_grad
forward
:
kron (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
kron_grad
data_type
:
out_grad
-
backward_api
:
kthvalue_grad
forward
:
kthvalue(Tensor x, int k, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args
:
(Tensor x, Tensor indices, Tensor out_grad, int k, int axis, bool keepdim)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
kthvalue_grad
-
backward_api
:
label_smooth_grad
forward
:
label_smooth (Tensor label, Tensor prior_dist, float epsilon) -> Tensor(out)
args
:
(Tensor out_grad, float epsilon)
output
:
Tensor(label_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
label_smooth_grad
-
backward_api
:
layer_norm_grad
forward
:
layer_norm (Tensor x, Tensor scale, Tensor bias, float epsilon, int begin_norm_axis, bool is_test) -> Tensor(out), Tensor(mean), Tensor(variance)
args
:
(Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, Tensor out_grad, float epsilon, int begin_norm_axis, bool is_test)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta
:
func
:
LayerNormGradInferMeta
param
:
[
x
,
scale
,
bias
]
kernel
:
func
:
layer_norm_grad
data_type
:
out_grad
no_need_buffer
:
bias
optional
:
scale, bias
-
backward_api
:
leaky_relu_double_grad
forward
:
leaky_relu_grad (Tensor x, Tensor grad_out, float alpha) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_x_grad, float alpha)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
grad_x_grad
]
kernel
:
func
:
leaky_relu_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
leaky_relu_grad
forward
:
leaky_relu (Tensor x, float alpha) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
leaky_relu_grad
backward
:
leaky_relu_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
lerp_grad
forward
:
lerp (Tensor x, Tensor y, Tensor weight) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor weight, Tensor out, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
lerp_grad
-
backward_api
:
lgamma_grad
forward
:
lgamma(Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
lgamma_grad
-
backward_api
:
log10_grad
forward
:
log10 (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
log10_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
log1p_grad
forward
:
log1p (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
log1p_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
log2_grad
forward
:
log2 (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
log2_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
log_double_grad
forward
:
log_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_out, Tensor grad_x_grad)
output
:
Tensor(x_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
x
]
kernel
:
func
:
log_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
log_grad
forward
:
log (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
log_grad
backward
:
log_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
log_loss_grad
forward
:
log_loss (Tensor input, Tensor label, float epsilon) -> Tensor(out)
args
:
(Tensor input, Tensor label, Tensor out_grad, float epsilon)
output
:
Tensor(input_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
input
]
kernel
:
func
:
log_loss_grad
-
backward_api
:
log_softmax_grad
forward
:
log_softmax(Tensor x, int axis) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
log_softmax_grad
-
backward_api
:
logcumsumexp_grad
forward
:
logcumsumexp(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
args
:
(Tensor x, Tensor out, Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(x_grad)
kernel
:
func
:
logcumsumexp_grad
-
backward_api
:
logit_grad
forward
:
logit (Tensor x, float eps = 1e-6f) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float eps)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
logit_grad
-
backward_api
:
logsigmoid_grad
forward
:
logsigmoid (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
logsigmoid_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
logsumexp_grad
forward
:
logsumexp(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keepdim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
logsumexp_grad
-
backward_api
:
masked_select_grad
forward
:
masked_select (Tensor x, Tensor mask) -> Tensor(out)
args
:
(Tensor x, Tensor mask, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
masked_select_grad
data_type
:
x
no_need_buffer
:
x
-
backward_api
:
matmul_double_grad
forward
:
matmul_grad (Tensor x, Tensor y, Tensor grad_out, bool transpose_x=false, bool transpose_y=false) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor x, Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, bool transpose_x=false, bool transpose_y=false)
output
:
Tensor(x_grad), Tensor(y_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
x
,
y
,
grad_out
]
kernel
:
func
:
matmul_double_grad
backward
:
matmul_triple_grad
optional
:
grad_x_grad, grad_y_grad
-
backward_api
:
matmul_grad
forward
:
matmul (Tensor x, Tensor y, bool transpose_x=false, bool transpose_y=false) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, bool transpose_x=false, bool transpose_y=false)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
matmul_grad
backward
:
matmul_double_grad
-
backward_api
:
matmul_triple_grad
forward
:
matmul_double_grad (Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, bool transpose_x=false, bool transpose_y=false) -> Tensor(grad_x), Tensor(grad_y), Tensor(grad_grad_out)
args
:
(Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, Tensor grad_x_grad, Tensor grad_y_grad, Tensor grad_grad_out_grad, bool transpose_x=false, bool transpose_y=false)
output
:
Tensor(x_grad), Tensor(y_grad), Tensor(fwd_grad_out_grad), Tensor(fwd_grad_grad_x_grad), Tensor(fwd_grad_grad_y_grad)
infer_meta
:
func
:
GeneralQuinaryGradInferMeta
param
:
[
x
,
y
,
fwd_grad_out
,
fwd_grad_grad_x
,
fwd_grad_grad_y
]
kernel
:
func
:
matmul_triple_grad
optional
:
grad_x_grad, grad_y_grad, grad_grad_out_grad
-
backward_api
:
matrix_power_grad
forward
:
matrix_power (Tensor x, int n) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int n)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
matrix_power_grad
-
backward_api
:
max_grad
forward
:
max (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
max_grad
-
backward_api
:
max_pool2d_with_index_grad
forward
:
max_pool2d_with_index(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive) -> Tensor(out), Tensor(mask)
args
:
(Tensor x, Tensor mask, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output
:
Tensor(x_grad)
infer_meta
:
func
:
MaxPoolWithIndexGradInferMeta
kernel
:
func
:
max_pool2d_with_index_grad
-
backward_api
:
max_pool3d_with_index_grad
forward
:
max_pool3d_with_index(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive) -> Tensor(out), Tensor(mask)
args
:
(Tensor x, Tensor mask, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output
:
Tensor(x_grad)
infer_meta
:
func
:
MaxPoolWithIndexGradInferMeta
kernel
:
func
:
max_pool3d_with_index_grad
-
backward_api
:
maximum_grad
forward
:
maximum(Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis=-1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
maximum_grad
-
backward_api
:
maxout_grad
forward
:
maxout(Tensor x, int groups, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int groups, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
maxout_grad
-
backward_api
:
mean_all_grad
forward
:
mean_all(Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mean_all_grad
-
backward_api
:
mean_double_grad
forward
:
mean_grad (Tensor x, Tensor grad_out, int64_t[] dims={}, bool keep_dim=false, bool reduce_all =
false
) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(grad_out_grad)
invoke
:
mean(grad_x_grad, dims, keep_dim)
-
backward_api
:
mean_grad
forward
:
mean (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mean_grad
backward
:
mean_double_grad
no_need_buffer
:
x
-
backward_api
:
meshgrid_grad
forward
:
meshgrid (Tensor[] inputs) -> Tensor[](outputs)
args
:
(Tensor[] inputs, Tensor[] outputs_grad)
output
:
Tensor[](inputs_grad){inputs.size()}
infer_meta
:
func
:
MeshgridGradInferMeta
kernel
:
func
:
meshgrid_grad
-
backward_api
:
min_grad
forward
:
min (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
min_grad
-
backward_api
:
minimum_grad
forward
:
minimum(Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis=-1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
minimum_grad
-
backward_api
:
mish_grad
forward
:
mish (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mish_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
mode_grad
forward
:
mode(Tensor x, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args
:
(Tensor x, Tensor indices, Tensor out_grad, int axis, bool keepdim)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mode_grad
-
backward_api
:
modulo_grad
forward
:
modulo (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
modulo_grad
no_need_buffer
:
x, y
-
backward_api
:
multi_dot_grad
forward
:
multi_dot (Tensor[] x) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad)
output
:
Tensor[](x_grad) {x.size()}
infer_meta
:
func
:
MultiDotGradInferMeta
kernel
:
func
:
multi_dot_grad
-
backward_api
:
multiplex_grad
forward
:
multiplex (Tensor[] ins, Tensor ids) -> Tensor(out)
args
:
(Tensor[] ins, Tensor ids, Tensor out_grad)
output
:
Tensor[](ins_grad){ins.size()}
infer_meta
:
func
:
MultiplexGradInferMeta
param
:
[
ids
,
out_grad
]
kernel
:
func
:
multiplex_grad
param
:
[
ids
,
out_grad
]
-
backward_api
:
multiply_double_grad
forward
:
multiply_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor x, Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
x
,
y
,
grad_out
]
kernel
:
func
:
multiply_double_grad
optional
:
grad_x_grad, grad_y_grad
backward
:
multiply_triple_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
multiply_grad
forward
:
multiply (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
multiply_grad
backward
:
multiply_double_grad
-
backward_api
:
multiply_triple_grad
forward
:
multiply_double_grad (Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, int aixs = -1) -> Tensor(grad_x), Tensor(grad_y), Tensor(grad_grad_out)
args
:
(Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, Tensor grad_x_grad, Tensor grad_y_grad, Tensor grad_grad_out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad), Tensor(fwd_grad_out_grad), Tensor(fwd_grad_grad_x_grad), Tensor(fwd_grad_grad_y_grad)
infer_meta
:
func
:
GeneralQuinaryGradInferMeta
param
:
[
x
,
y
,
fwd_grad_out
,
x
,
y
]
kernel
:
func
:
multiply_triple_grad
optional
:
fwd_grad_grad_x, fwd_grad_grad_y, grad_grad_out_grad
-
backward_api
:
mv_grad
forward
:
mv (Tensor x, Tensor vec) -> Tensor(out)
args
:
(Tensor x, Tensor vec, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(vec_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
vec
]
kernel
:
func
:
mv_grad
-
backward_api
:
nll_loss_grad
forward
:
nll_loss (Tensor input, Tensor label, Tensor weight, int64_t ignore_index, str reduction) -> Tensor(out), Tensor(total_weight)
args
:
(Tensor input, Tensor label, Tensor weight, Tensor total_weight, Tensor out_grad, int64_t ignore_index, str reduction)
output
:
Tensor(input_grad)
infer_meta
:
func
:
NllLossGradInferMeta
kernel
:
func
:
nll_loss_grad
data_type
:
input
optional
:
weight
-
backward_api
:
norm_grad
forward
:
norm (Tensor x, int axis, float epsilon, bool is_test) -> Tensor(out), Tensor(norm)
args
:
(Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
norm_grad
-
backward_api
:
p_norm_grad
forward
:
p_norm(Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, float porder, int axis, float epsilon, bool keepdim, bool asvector)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
p_norm_grad
-
backward_api
:
pad3d_double_grad
forward
:
pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode, float pad_value, str data_format) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, IntArray paddings, str mode, float pad_value, str data_format)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
Pad3dInferMeta
kernel
:
func
:
pad3d
-
backward_api
:
pad3d_grad
forward
:
pad3d(Tensor x, IntArray paddings, str mode, float pad_value, str data_format) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, IntArray paddings, str mode, float pad_value, str data_format)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pad3d_grad
no_need_buffer
:
x
backward
:
pad3d_double_grad
-
backward_api
:
pad_double_grad
forward
:
pad_grad(Tensor x, Tensor grad_out, int[] paddings, float pad_value) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int[] paddings, float pad_value)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
PadInferMeta
kernel
:
func
:
pad
-
backward_api
:
pad_grad
forward
:
pad(Tensor x, int[] paddings, float pad_value) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int[] paddings, float pad_value)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pad_grad
param
:
[
out_grad
,
paddings
,
pad_value
]
no_need_buffer
:
x
backward
:
pad_double_grad
-
backward_api
:
pixel_shuffle_grad
forward
:
pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out)
args
:
(Tensor out_grad, int upscale_factor, str data_format)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PixelShuffleGradInferMeta
kernel
:
func
:
pixel_shuffle_grad
-
backward_api
:
poisson_grad
forward
:
poisson (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
poisson_grad
-
backward_api
:
pool2d_double_grad
forward
:
pool2d_grad(Tensor x, Tensor out, Tensor grad_out, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
PoolInferMeta
kernel
:
func
:
pool2d_double_grad
use_gpudnn
:
true
-
backward_api
:
pool2d_grad
forward
:
pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PoolGradInferMeta
kernel
:
func
:
pool2d_grad
use_gpudnn
:
true
backward
:
pool2d_double_grad
-
backward_api
:
pool2d_grad_gpudnn_unused
forward
:
pool2d_gpudnn_unused(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PoolGradInferMeta
kernel
:
func
:
pool2d_grad
use_gpudnn
:
false
-
backward_api
:
pool3d_grad
forward
:
pool3d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PoolGradInferMeta
kernel
:
func
:
pool3d_grad
use_gpudnn
:
true
-
backward_api
:
pow_grad
forward
:
pow(Tensor x, Scalar s) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, Scalar s=-1)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pow_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
prelu_grad
forward
:
prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args
:
(Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
output
:
Tensor(x_grad), Tensor(alpha_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
alpha
]
kernel
:
func
:
prelu_grad
-
backward_api
:
psroi_pool_grad
forward
:
psroi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) -> Tensor(out)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
psroi_pool_grad
data_type
:
x
optional
:
boxes_num
# output is optional
-
backward_api
:
put_along_axis_grad
forward
:
put_along_axis (Tensor x, Tensor index, Tensor value, int axis, str reduce) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, int axis, str reduce)
output
:
Tensor(x_grad), Tensor(value_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
index
]
kernel
:
func
:
put_along_axis_grad
-
backward_api
:
real_grad
forward
:
real (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
invoke
:
real_grad_impl(out_grad, x_grad)
-
backward_api
:
reciprocal_grad
forward
:
reciprocal (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
reciprocal_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
reduce_prod_grad
forward
:
reduce_prod (Tensor x, int64_t[] dims, bool keep_dim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
prod_grad
-
backward_api
:
relu_double_grad
forward
:
relu_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor grad_x_grad)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
relu_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
relu_grad
forward
:
relu (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
relu_grad
backward
:
relu_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
reshape_double_grad
forward
:
reshape_grad (Tensor xshape, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor grad_out, Tensor grad_x_grad)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
grad_out
]
kernel
:
func
:
reshape_double_grad
no_need_buffer
:
grad_out
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
reshape_grad
forward
:
reshape (Tensor x, IntArray shape) -> Tensor(out), Tensor(xshape)
args
:
(Tensor xshape, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
KernelWithXShapeInferMeta
param
:
[
xshape
]
kernel
:
func
:
reshape_grad
param
:
[
out_grad
]
data_type
:
out_grad
backend
:
out_grad
layout
:
out_grad
backward
:
reshape_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
roi_align_grad
forward
:
roi_align (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned) -> Tensor(out)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
roi_align_grad
data_type
:
boxes
no_need_buffer
:
x
optional
:
boxes_num
-
backward_api
:
roi_pool_grad
forward
:
roi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale) -> Tensor(out), Tensor(arg_max)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, Tensor arg_max, Tensor out_grad, int pooled_height, int pooled_width, float spatial_scale)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
roi_pool_grad
data_type
:
x
optional
:
boxes_num
-
backward_api
:
roll_grad
forward
:
roll(Tensor x, IntArray shifts, int64_t[] axis) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, IntArray shifts, int64_t[] axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
roll_grad
data_type
:
x
no_need_buffer
:
x
-
backward_api
:
round_grad
forward
:
round(Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
round_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
rsqrt_double_grad
forward
:
rsqrt_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor grad_x, Tensor grad_x_grad)
output
:
Tensor(out_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
out
,
out
]
kernel
:
func
:
rsqrt_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
rsqrt_grad
forward
:
rsqrt (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
rsqrt_grad
backward
:
rsqrt_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
scale_double_grad
forward
:
scale_grad (Tensor grad_out, Scalar scale, float bias, bool bias_after_scale) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, Scalar scale=1.0, float bias=0.0, bool bias_after_scale=true)
output
:
Tensor(grad_out_grad)
invoke
:
scale(grad_x_grad, scale, 0.0, bias_after_scale)
backward
:
scale_triple_grad
-
backward_api
:
scale_grad
forward
:
scale (Tensor x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(out)
args
:
(Tensor out_grad, Scalar scale=1.0, float bias=0.0, bool bias_after_scale=true)
output
:
Tensor(x_grad)
invoke
:
scale(out_grad, scale, 0.0, bias_after_scale)
backward
:
scale_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
scale_triple_grad
forward
:
scale_double_grad (Tensor grad_grad_x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(grad_grad_out)
args
:
(Tensor grad_grad_out_grad, Scalar scale=1.0, float bias=0.0, bool bias_after_scale=true)
output
:
Tensor(grad_grad_x_grad)
invoke
:
scale(grad_grad_out_grad, scale, 0.0, bias_after_scale)
-
backward_api
:
scatter_grad
forward
:
scatter (Tensor x, Tensor index, Tensor updates, bool overwrite) -> Tensor(out)
args
:
(Tensor index, Tensor updates, Tensor out_grad, bool overwrite)
output
:
Tensor(x_grad), Tensor(updates_grad)
infer_meta
:
func
:
ScatterGradInferMeta
param
:
[
index
,
updates
,
out_grad
,
overwrite
]
kernel
:
func
:
scatter_grad
no_need_buffer
:
updates
-
backward_api
:
scatter_nd_add_grad
forward
:
scatter_nd_add (Tensor x, Tensor index, Tensor updates) -> Tensor(out)
args
:
(Tensor index, Tensor updates, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(updates_grad)
infer_meta
:
func
:
ScatterNdAddGradInferMeta
param
:
[
index
,
updates
,
out_grad
]
kernel
:
func
:
scatter_nd_add_grad
no_need_buffer
:
updates
-
backward_api
:
segment_pool_grad
forward
:
segment_pool (Tensor x, Tensor segment_ids, str pooltype) -> Tensor(out), Tensor(summed_ids)
args
:
(Tensor x, Tensor segment_ids, Tensor out, Tensor summed_ids, Tensor out_grad, str pooltype)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
segment_pool_grad
data_type
:
x
optional
:
summed_ids
-
backward_api
:
selu_grad
forward
:
selu (Tensor x, float scale, float alpha) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, float scale, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
selu_grad
-
backward_api
:
sigmoid_cross_entropy_with_logits_grad
forward
:
sigmoid_cross_entropy_with_logits (Tensor x, Tensor label, bool normalize, int ignore_index) -> Tensor(out)
args
:
(Tensor x, Tensor label, Tensor out_grad, bool normalize, int ignore_index)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
sigmoid_cross_entropy_with_logits_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sigmoid_double_grad
forward
:
sigmoid_grad (Tensor out, Tensor fwd_grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor fwd_grad_out, Tensor grad_x_grad)
output
:
Tensor(out_grad), Tensor(fwd_grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
out
,
fwd_grad_out
]
kernel
:
func
:
sigmoid_double_grad
backward
:
sigmoid_triple_grad
inplace
:
(grad_x_grad -> fwd_grad_out_grad)
-
backward_api
:
sigmoid_grad
forward
:
sigmoid (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
sigmoid_grad
backward
:
sigmoid_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sigmoid_triple_grad
forward
:
sigmoid_double_grad (Tensor out, Tensor fwd_grad_out, Tensor grad_grad_x) -> Tensor(grad_out), Tensor(grad_grad_out)
args
:
(Tensor out, Tensor fwd_grad_out, Tensor grad_grad_x, Tensor grad_out_grad, Tensor grad_grad_out_grad)
output
:
Tensor(out_grad), Tensor(fwd_grad_out_grad), Tensor(grad_grad_x_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
out
,
fwd_grad_out
,
grad_grad_x
]
kernel
:
func
:
sigmoid_triple_grad
optional
:
grad_grad_out_grad
inplace
:
(grad_grad_x -> fwd_grad_out_grad)
-
backward_api
:
silu_grad
forward
:
silu (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
silu_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sin_grad
forward
:
sin (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
sin_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sinh_grad
forward
:
sinh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
sinh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
slice_grad
forward
:
slice (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) -> Tensor(out)
args
:
(Tensor input, Tensor out_grad, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
output
:
Tensor(input_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
input
]
kernel
:
func
:
slice_grad
no_need_buffer
:
input
-
backward_api
:
soft_shrink_grad
forward
:
soft_shrink (Tensor x, float lambda) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float lambda)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
soft_shrink_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
softmax_grad
forward
:
softmax (Tensor x, int axis) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
softmax_grad
use_gpudnn
:
true
-
backward_api
:
split_grad
forward
:
split (Tensor x, IntArray num_or_sections, Scalar axis) -> Tensor[](out)
args
:
(Tensor[] out_grad, Scalar axis = -1)
output
:
Tensor(x_grad)
invoke
:
concat( out_grad, axis)
# TODO(zhangyunfei) The config of double grad and triple grad will be supported in the future.
-
backward_api
:
sqrt_double_grad
forward
:
sqrt_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor grad_x, Tensor grad_x_grad)
output
:
Tensor(out_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
out
,
out
]
kernel
:
func
:
sqrt_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
sqrt_grad
forward
:
sqrt (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
sqrt_grad
backward
:
sqrt_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
square_double_grad
forward
:
square_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_out, Tensor grad_x_grad)
output
:
Tensor(x_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
x
]
kernel
:
func
:
square_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
square_grad
forward
:
square (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
square_grad
backward
:
square_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
squeeze_double_grad
forward
:
squeeze_grad(Tensor xshape, Tensor grad_out, int[] axes) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int[] axes)
output
:
Tensor(grad_out_grad)
invoke
:
squeeze(grad_x_grad, axes)
-
backward_api
:
squeeze_grad
forward
:
squeeze(Tensor x, int[] axes) -> Tensor(out), Tensor(xshape)
args
:
(Tensor xshape, Tensor out_grad, int[] axes)
output
:
Tensor(x_grad)
infer_meta
:
func
:
KernelWithXShapeInferMeta
param
:
[
xshape
]
kernel
:
func
:
squeeze_grad
inplace
:
(out_grad -> x_grad)
backward
:
squeeze_double_grad
-
backward_api
:
stack_grad
forward
:
stack (Tensor[] x, int axis) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad, int axis)
output
:
Tensor[](x_grad){x.size()}
infer_meta
:
func
:
StackGradInferMeta
param
:
[
out_grad
,
axis
]
kernel
:
func
:
stack_grad
param
:
[
out_grad
,
axis
]
no_need_buffer
:
x
-
backward_api
:
strided_slice_grad
forward
:
strided_slice (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int[] axes, IntArray starts, IntArray ends, IntArray strides)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
strided_slice_grad
no_need_buffer
:
x
-
backward_api
:
subtract_double_grad
forward
:
subtract_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
grad_out
]
kernel
:
func
:
subtract_double_grad
optional
:
grad_x_grad, grad_y_grad
no_need_buffer
:
y, grad_out
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
subtract_grad
forward
:
subtract (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
subtract_grad
no_need_buffer
:
x, y
backward
:
subtract_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sum_double_grad
forward
:
sum_grad (Tensor x, Tensor grad_out, int64_t[] dims, bool keep_dim, bool reduce_all=false) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(grad_out_grad)
invoke
:
sum(grad_x_grad, dims, grad_x_grad.dtype(), keep_dim)
backward
:
sum_triple_grad
-
backward_api
:
sum_grad
forward
:
sum (Tensor x, int64_t[] dims={}, DataType out_dtype=DataType::UNDEFINED, bool keep_dim=false) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
sum_grad
no_need_buffer
:
x
backward
:
sum_double_grad
-
backward_api
:
sum_triple_grad
forward
:
sum_double_grad (Tensor grad_grad_x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(grad_grad_out)
args
:
(Tensor grad_grad_x, Tensor grad_grad_out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(grad_grad_x_grad)
invoke
:
sum_grad(grad_grad_x, grad_grad_out_grad, dims, keep_dim, reduce_all, grad_grad_x_grad)
-
backward_api
:
swish_grad
forward
:
swish (Tensor x, float beta=1.0) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float bete=1.0)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
swish_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
take_along_axis_grad
forward
:
take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
take_along_axis_grad
-
backward_api
:
tan_grad
forward
:
tan (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
tan_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
tanh_double_grad
forward
:
tanh_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor grad_out, Tensor grad_x_grad)
output
:
Tensor(out_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
out
,
out
]
kernel
:
func
:
tanh_double_grad
backward
:
tanh_triple_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
tanh_grad
forward
:
tanh (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
tanh_grad
backward
:
tanh_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
tanh_shrink_grad
forward
:
tanh_shrink (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
tanh_shrink_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
tanh_triple_grad
forward
:
tanh_double_grad (Tensor out, Tensor grad_out_forward, Tensor grad_x_grad_forward) -> Tensor(grad_out_new), Tensor(grad_out_grad)
args
:
(Tensor out, Tensor grad_out_forward, Tensor grad_x_grad_forward, Tensor grad_out_new_grad, Tensor grad_out_grad_grad)
output
:
Tensor(out_grad), Tensor(grad_out_forward_grad), Tensor(grad_x_grad_forward_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
out
,
out
,
grad_x_grad_forward
]
kernel
:
func
:
tanh_triple_grad
inplace
:
(grad_x_grad_forward -> grad_out_forward_grad)
-
backward_api
:
thresholded_relu_grad
forward
:
thresholded_relu (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
thresholded_relu_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
tile_double_grad
forward
:
tile_grad (Tensor x, Tensor grad_out, IntArray repeat_times) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, IntArray repeat_times)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
TileInferMeta
kernel
:
func
:
tile
-
backward_api
:
tile_grad
forward
:
tile (Tensor x, IntArray repeat_times) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, IntArray repeat_times)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
tile_grad
no_need_buffer
:
x
backward
:
tile_double_grad
-
backward_api
:
top_k_grad
forward
:
top_k (Tensor x, Scalar k, int axis = -1, bool largest =
true
, bool sorted =
true
) -> Tensor(out), Tensor(indices)
args
:
(Tensor x, Tensor indices, Tensor out_grad, Scalar k = -1, int axis = -1, bool largest =
true
, bool sorted =
true
)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
top_k_grad
-
backward_api
:
trace_grad
forward
:
trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int offset, int axis1, int axis2)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
trace_grad
no_need_buffer
:
x
-
backward_api
:
transpose_double_grad
forward
:
transpose_grad (Tensor grad_out, int[] axis) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int[] axis)
output
:
Tensor(grad_out_grad)
invoke
:
transpose(grad_x_grad, axis)
-
backward_api
:
transpose_grad
forward
:
transpose (Tensor x, int[] axis) -> Tensor(out)
args
:
(Tensor out_grad, int[] axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
TransposeGradInferMeta
param
:
[
out_grad
,
axis
]
kernel
:
func
:
transpose_grad
backward
:
transpose_double_grad
-
backward_api
:
triangular_solve_grad
forward
:
triangular_solve (Tensor x, Tensor y, bool upper, bool tranpose, bool unitriangular) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper, bool tranpose, bool unitriangular)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
triangular_solve_grad
-
backward_api
:
tril_triu_grad
forward
:
tril_triu(Tensor x, int diagonal, bool lower) -> Tensor(out)
args
:
(Tensor out_grad, int diagonal, bool lower)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
tril_triu_grad
-
backward_api
:
trunc_grad
forward
:
trunc (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
trunc_grad
-
backward_api
:
unbind_grad
forward
:
unbind (Tensor input, int axis) -> Tensor[](out)
args
:
(Tensor[] out_grad, int axis)
output
:
Tensor(input_grad)
invoke
:
stack(out_grad, axis)
-
backward_api
:
unfold_grad
forward
:
unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
unfold_grad
no_need_buffer
:
x
-
backward_api
:
unsqueeze_double_grad
forward
:
unsqueeze_grad(Tensor xshape, Tensor grad_out, IntArray axes) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, IntArray axes)
output
:
Tensor(grad_out_grad)
invoke
:
unsqueeze(grad_x_grad, axes)
-
backward_api
:
unsqueeze_grad
forward
:
unsqueeze(Tensor x, IntArray axes) -> Tensor(out), Tensor(xshape)
args
:
(Tensor xshape, Tensor out_grad, IntArray axes)
output
:
Tensor(x_grad)
infer_meta
:
func
:
KernelWithXShapeInferMeta
param
:
[
xshape
]
kernel
:
func
:
unsqueeze_grad
param
:
[
xshape
,
out_grad
]
inplace
:
(out_grad -> x_grad)
backward
:
unsqueeze_double_grad
-
backward_api
:
where_grad
forward
:
where (Tensor condition, Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor condition, Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
where_grad
no_need_buffer
:
x, y
python/paddle/utils/code_gen/cross_validate.py
浏览文件 @
fcd32950
...
@@ -43,7 +43,7 @@ if __name__ == "__main__":
...
@@ -43,7 +43,7 @@ if __name__ == "__main__":
parser
.
add_argument
(
'--backward_yaml_paths'
,
parser
.
add_argument
(
'--backward_yaml_paths'
,
type
=
str
,
type
=
str
,
nargs
=
'+'
,
nargs
=
'+'
,
default
=
str
(
current_dir
/
"backward
.yaml
.yaml"
),
default
=
str
(
current_dir
/
"backward
_api.parsed
.yaml"
),
help
=
"backward api yaml file."
)
help
=
"backward api yaml file."
)
args
=
parser
.
parse_args
()
args
=
parser
.
parse_args
()
...
...
python/paddle/utils/code_gen/legacy_api.yaml
0 → 100644
浏览文件 @
fcd32950
-
api
:
abs
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
RealAndImagInferMeta
kernel
:
func
:
abs
backward
:
abs_grad
# accuracy
-
api
:
accuracy
args
:
(Tensor x, Tensor indices, Tensor label)
output
:
Tensor(accuracy), Tensor(correct), Tensor(total)
infer_meta
:
func
:
AccuracyInferMeta
kernel
:
func
:
accuracy
dtype
:
x
# acos
-
api
:
acos
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
acos
backward
:
acos_grad
# acosh
-
api
:
acosh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
acosh
backward
:
acosh_grad
-
api
:
adadelta
args
:
(Tensor param, Tensor grad, Tensor avg_squared_grad, Tensor avg_squared_update, float rho, float epsilon)
output
:
Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out)
infer_meta
:
func
:
AdadeltaInferMeta
kernel
:
func
:
adadelta
-
api
:
adam
args
:
(Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, Scalar beta1, Scalar beta2, Scalar epsilon, bool lazy_mode, int64_t min_row_size_to_use_multithread, bool multi_precision, bool use_global_beta_pow)
output
:
Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_outs)
optional
:
master_param, skip_update
invoke
:
adam_impl(param, grad, learning_rate, moment1, moment2, beta1_pow, beta2_pow, master_param, skip_update, beta1, beta2, epsilon, lazy_mode, min_row_size_to_use_multithread, multi_precision, use_global_beta_pow)
-
api
:
adamax
args
:
(Tensor param, Tensor grad, Tensor learning_rate, Tensor moment, Tensor inf_norm, Tensor beta1_pow, float beta1, float beta2, float epsilon)
output
:
Tensor(param_out), Tensor(avg_squared_grad_out), Tensor(avg_squared_update_out)
infer_meta
:
func
:
AdamaxInferMeta
kernel
:
func
:
adamax
-
api
:
adamw
args
:
(Tensor param, Tensor grad, Tensor learning_rate, Tensor moment1, Tensor moment2, Tensor beta1_pow, Tensor beta2_pow, Tensor master_param, Tensor skip_update, Scalar beta1, Scalar beta2, Scalar epsilon, float lr_ratio, float coeff, bool with_decay, bool lazy_mode, int64_t min_row_size_to_use_multithread, bool multi_precision, bool use_global_beta_pow)
output
:
Tensor(param_out), Tensor(moment1_out), Tensor(moment2_out), Tensor(beta1_pow_out), Tensor(beta2_pow_out), Tensor(master_param_outs)
optional
:
master_param, skip_update
invoke
:
adamw_impl(param, grad, learning_rate, moment1, moment2, beta1_pow, beta2_pow, master_param, skip_update, beta1, beta2, epsilon, lr_ratio, coeff, with_decay, lazy_mode, min_row_size_to_use_multithread, multi_precision, use_global_beta_pow)
-
api
:
add
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
add
backward
:
add_grad
-
api
:
add_n
args
:
(Tensor[] x)
output
:
Tensor
infer_meta
:
func
:
AddNInferMeta
kernel
:
func
:
add_n
backward
:
add_n_grad
-
api
:
addmm
args
:
(Tensor input, Tensor x, Tensor y, float alpha, float beta)
output
:
Tensor
infer_meta
:
func
:
AddmmInferMeta
kernel
:
func
:
addmm
backward
:
addmm_grad
-
api
:
all
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
all
-
api
:
allclose
args
:
(Tensor x, Tensor y, Scalar rtol, Scalar atol, bool equal_nan)
output
:
Tensor(out)
infer_meta
:
func
:
AllValueCompareInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
allclose
-
api
:
any
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
any
-
api
:
arange
args
:
(Tensor start, Tensor end, Tensor step, DataType dtype, Place place={})
output
:
Tensor
infer_meta
:
func
:
ArangeInferMeta
param
:
[
start
,
end
,
step
]
kernel
:
func
:
arange
param
:
[
start
,
end
,
step
]
data_type
:
dtype
backend
:
place
data_transform
:
support_trans_dtype
:
start, end, step
# arg_max
-
api
:
argmax
args
:
(Tensor x, int64_t axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
infer_meta
:
func
:
ArgMinMaxInferMeta
kernel
:
func
:
arg_max
# arg_min
-
api
:
argmin
args
:
(Tensor x, int64_t axis, bool keepdims, bool flatten, int dtype)
output
:
Tensor
infer_meta
:
func
:
ArgMinMaxInferMeta
kernel
:
func
:
arg_min
# argsort
-
api
:
argsort
args
:
(Tensor x, int axis, bool descending)
output
:
Tensor(out), Tensor(indices)
infer_meta
:
func
:
ArgsortInferMeta
kernel
:
func
:
argsort
backward
:
argsort_grad
# asin
-
api
:
asin
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
asin
backward
:
asin_grad
# asinh
-
api
:
asinh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
asinh
backward
:
asinh_grad
# assign
-
api
:
assign
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
assign
backward
:
assign_grad
-
api
:
assign_out_
args
:
(Tensor x, Tensor output)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
assign
param
:
[
x
]
inplace
:
(output -> out)
backward
:
assign_out__grad
# atan
-
api
:
atan
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
atan
backward
:
atan_grad
-
api
:
atan2
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
Atan2InferMeta
kernel
:
func
:
atan2
backward
:
atan2_grad
# atanh
-
api
:
atanh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
atanh
backward
:
atanh_grad
# auc
-
api
:
auc
args
:
(Tensor x, Tensor label, Tensor stat_pos, Tensor stat_neg, str curve, int num_thresholds, int slide_steps)
output
:
Tensor(auc), Tensor(stat_pos_out), Tensor(stat_neg_out)
infer_meta
:
func
:
AucInferMeta
kernel
:
func
:
auc
# batch_norm
-
api
:
batch_norm
args
:
(Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
output
:
Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
invoke
:
batch_norm_impl(x, scale, bias, mean, variance, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics, fuse_with_relu)
backward
:
batch_norm_grad
-
api
:
bce_loss
args
:
(Tensor input, Tensor label)
output
:
Tensor
infer_meta
:
func
:
BCELossInferMeta
kernel
:
func
:
bce_loss
backward
:
bce_loss_grad
# bernoulli
-
api
:
bernoulli
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
bernoulli
# bitwise_and
-
api
:
bitwise_and
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
bitwise_and
# bitwise_not
-
api
:
bitwise_not
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
bitwise_not
# bitwise_or
-
api
:
bitwise_or
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
bitwise_or
# bitwise_xor
-
api
:
bitwise_xor
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
bitwise_xor
# brelu
-
api
:
brelu
args
:
(Tensor x, float t_min, float t_max)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
brelu
backward
:
brelu_grad
-
api
:
cast
args
:
(Tensor x, DataType out_dtype)
output
:
Tensor
infer_meta
:
func
:
CastInferMeta
kernel
:
func
:
cast
param
:
[
x
,
out_dtype
]
data_type
:
x
backward
:
cast_grad
-
api
:
ceil
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
ceil
backward
:
ceil_grad
-
api
:
celu
args
:
(Tensor x, float alpha)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
celu
backward
:
celu_grad
# cholesky
-
api
:
cholesky
args
:
(Tensor x, bool upper)
output
:
Tensor
infer_meta
:
func
:
CholeskyInferMeta
kernel
:
func
:
cholesky
backward
:
cholesky_grad
# cholesky_solve
-
api
:
cholesky_solve
args
:
(Tensor x, Tensor y, bool upper)
output
:
Tensor
infer_meta
:
func
:
CholeskySolveInferMeta
kernel
:
func
:
cholesky_solve
backward
:
cholesky_solve_grad
-
api
:
clip
args
:
(Tensor x, Scalar(float) min, Scalar(float) max)
output
:
Tensor(out)
inplace
:
(x -> out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
clip
backward
:
clip_grad
-
api
:
concat
args
:
(Tensor[] x, Scalar(int64_t) axis)
output
:
Tensor
infer_meta
:
func
:
ConcatInferMeta
param
:
[
x
,
axis
]
kernel
:
func
:
concat
backward
:
concat_grad
-
api
:
conj
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
conj
backward
:
conj_grad
-
api
:
conv2d
args
:
(Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor
invoke
:
conv2d_impl(input, filter, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search)
backward
:
conv2d_grad
-
api
:
conv2d_transpose
args
:
(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(out)
infer_meta
:
func
:
ConvTransposeInferMeta
kernel
:
func
:
conv2d_transpose
use_gpudnn
:
true
backward
:
conv2d_transpose_grad
-
api
:
conv3d
args
:
(Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor
invoke
:
conv3d_impl(input, filter, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search)
backward
:
conv3d_grad
-
api
:
conv3d_transpose
args
:
(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(out)
infer_meta
:
func
:
ConvTransposeInferMeta
kernel
:
func
:
conv3d_transpose
use_gpudnn
:
true
backward
:
conv3d_transpose_grad
-
api
:
copy_to
args
:
(Tensor x, Place place, bool blocking)
output
:
Tensor
invoke
:
copy_to_impl(x, place, blocking)
# cos
-
api
:
cos
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
cos
backward
:
cos_grad
# cosh
-
api
:
cosh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
cosh
backward
:
cosh_grad
-
api
:
cross
args
:
(Tensor x, Tensor y, int axis = 9)
output
:
Tensor
infer_meta
:
func
:
CrossInferMeta
kernel
:
func
:
cross
backward
:
cross_grad
# Part of python API paddle.nn.functional.cross_entropy
-
api
:
cross_entropy_with_softmax
args
:
(Tensor input, Tensor label, bool soft_label, bool use_softmax, bool numeric_stable_mode, int ignore_index, int axis)
output
:
Tensor(softmax), Tensor(loss)
infer_meta
:
func
:
CrossEntropyWithSoftmaxInferMeta
kernel
:
func
:
cross_entropy_with_softmax
data_type
:
input
backward
:
cross_entropy_with_softmax_grad
-
api
:
cumprod
args
:
(Tensor x, int dim)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cumprod
backward
:
cumprod_grad
-
api
:
cumsum
args
:
(Tensor x, int axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(out)
infer_meta
:
func
:
CumInferMeta
kernel
:
func
:
cumsum
backward
:
cumsum_grad
-
api
:
deformable_conv
args
:
(Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step)
output
:
Tensor(out)
infer_meta
:
func
:
DeformableConvInferMeta
kernel
:
func
:
deformable_conv
data_type
:
x
optional
:
mask
backward
:
deformable_conv_grad
-
api
:
depthwise_conv2d
args
:
(Tensor x, Tensor filter, int[] strides, int[] paddings, str padding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn)
output
:
Tensor(out)
infer_meta
:
func
:
ConvInferMeta
param
:
[
x
,
filter
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
use_addto
,
workspace_size_MB
,
exhaustive_search
]
kernel
:
func
:
depthwise_conv2d
param
:
[
x
,
filter
,
strides
,
paddings
,
padding_algorithm
,
groups
,
dilations
,
data_format
,
use_addto
,
workspace_size_MB
,
exhaustive_search
,
fuse_relu
]
use_gpudnn
:
use_gpudnn
backward
:
depthwise_conv2d_grad
-
api
:
depthwise_conv2d_transpose
args
:
(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(out)
infer_meta
:
func
:
ConvTransposeInferMeta
kernel
:
func
:
depthwise_conv2d_transpose
backward
:
depthwise_conv2d_transpose_grad
-
api
:
det
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
determinant
backward
:
det_grad
-
api
:
diag
args
:
(Tensor x, int offset, float padding_value)
output
:
Tensor
infer_meta
:
func
:
DiagInferMeta
kernel
:
func
:
diag
-
api
:
diagonal
args
:
(Tensor x, int offset, int axis1, int axis2)
output
:
Tensor
infer_meta
:
func
:
DiagonalInferMeta
kernel
:
func
:
diagonal
backward
:
diagonal_grad
-
api
:
digamma
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
digamma
backward
:
digamma_grad
-
api
:
dist
args
:
(Tensor x, Tensor y, float p)
output
:
Tensor
infer_meta
:
func
:
DistInferMeta
kernel
:
func
:
dist
backward
:
dist_grad
-
api
:
divide
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
divide
backward
:
divide_grad
-
api
:
dot
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
DotInferMeta
kernel
:
func
:
dot
-
api
:
dropout
args
:
(Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed)
output
:
Tensor(out), Tensor(mask)
infer_meta
:
func
:
DropoutInferMeta
kernel
:
func
:
dropout
data_type
:
x
optional
:
seed_tensor
backward
:
dropout_grad
# eigh
-
api
:
eigh
args
:
(Tensor x, str uplo)
output
:
Tensor(out_w), Tensor(out_v)
infer_meta
:
func
:
EighInferMeta
kernel
:
func
:
eigh
backward
:
eigh_grad
-
api
:
einsum
args
:
(Tensor[] x, str equation)
output
:
Tensor, Tensor[]{x.size()}, Tensor[]{x.size()}
infer_meta
:
func
:
EinsumInferMeta
param
:
[
x
,
equation
]
kernel
:
func
:
einsum
backward
:
einsum_grad
-
api
:
elementwise_pow
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
elementwise_pow
backward
:
elementwise_pow_grad
# elu
-
api
:
elu
args
:
(Tensor x, float alpha)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
elu
backward
:
elu_grad
-
api
:
embedding
args
:
(Tensor x, Tensor weight, int64_t padding_idx=-1, bool sparse=false)
output
:
Tensor
invoke
:
embedding_impl(x, weight, padding_idx, sparse)
backward
:
embedding_grad
-
api
:
empty
args
:
(IntArray shape, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output
:
Tensor
infer_meta
:
func
:
CreateInferMeta
param
:
[
shape
,
dtype
]
kernel
:
func
:
empty
param
:
[
shape
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
empty_like
args
:
(Tensor x, DataType dtype = DataType::UNDEFINED, Place place = {})
output
:
Tensor
infer_meta
:
func
:
CreateLikeInferMeta
param
:
[
x
,
dtype
]
kernel
:
func
:
empty_like
param
:
[
x
,
dtype
]
data_type
:
dtype > x
backend
:
place > x
-
api
:
equal
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
equal
-
api
:
equal_all
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
CompareAllInferMeta
kernel
:
func
:
equal_all
# erf
-
api
:
erf
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
erf
backward
:
erf_grad
# erfinv
-
api
:
erfinv
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
erfinv
backward
:
erfinv_grad
# exp
-
api
:
exp
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
exp
backward
:
exp_grad
# expand
-
api
:
expand
args
:
(Tensor x, IntArray shape)
output
:
Tensor
infer_meta
:
func
:
ExpandInferMeta
kernel
:
func
:
expand
backward
:
expand_grad
# expand_as
-
api
:
expand_as
args
:
(Tensor x, Tensor y, int[] target_shape)
output
:
Tensor
infer_meta
:
func
:
ExpandAsInferMeta
kernel
:
func
:
expand_as
optional
:
y
backward
:
expand_as_grad
-
api
:
expm1
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
expm1
backward
:
expm1_grad
-
api
:
eye
args
:
(int64_t num_rows, int64_t num_columns, DataType dtype=DataType::FLOAT32, Place place={})
output
:
Tensor(out)
infer_meta
:
func
:
EyeInferMeta
param
:
[
num_rows
,
num_columns
,
dtype
]
kernel
:
func
:
eye
param
:
[
num_rows
,
num_columns
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
flatten
args
:
(Tensor x, int start_axis, int stop_axis)
output
:
Tensor(out), Tensor(xshape)
infer_meta
:
func
:
FlattenWithXShapeInferMeta
kernel
:
func
:
flatten_with_xshape
backend
:
x
inplace
:
(x -> out)
view
:
(x -> out)
intermediate
:
xshape
backward
:
flatten_grad
# flip
-
api
:
flip
args
:
(Tensor x, int[] axis)
output
:
Tensor
infer_meta
:
func
:
FlipInferMeta
kernel
:
func
:
flip
backward
:
flip_grad
-
api
:
floor
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
floor
backward
:
floor_grad
-
api
:
floor_divide
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
floor_divide
-
api
:
fmax
args
:
(Tensor x, Tensor y, int axis)
output
:
Tensor(out)
infer_meta
:
param
:
[
x
,
y
]
func
:
ElementwiseInferMeta
kernel
:
func
:
fmax
backward
:
fmax_grad
-
api
:
fmin
args
:
(Tensor x, Tensor y, int axis)
output
:
Tensor(out)
infer_meta
:
param
:
[
x
,
y
]
func
:
ElementwiseInferMeta
kernel
:
func
:
fmin
backward
:
fmin_grad
-
api
:
frobenius_norm
args
:
(Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMetaBase
kernel
:
func
:
frobenius_norm
backward
:
frobenius_norm_grad
-
api
:
full
args
:
(IntArray shape, Scalar value, DataType dtype=DataType::FLOAT32, Place place=CPUPlace())
output
:
Tensor
infer_meta
:
func
:
CreateInferMeta
param
:
[
shape
,
dtype
]
kernel
:
func
:
full
param
:
[
shape
,
value
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
full_batch_size_like
args
:
(Tensor input, int[] shape, DataType dtype, Scalar value, int input_dim_idx, int output_dim_idx, Place place=CPUPlace())
output
:
Tensor
infer_meta
:
func
:
FullBatchSizeLikeInferMeta
param
:
[
input
,
shape
,
value
,
dtype
,
input_dim_idx
,
output_dim_idx
]
kernel
:
func
:
full_batch_size_like
param
:
[
input
,
shape
,
value
,
dtype
,
input_dim_idx
,
output_dim_idx
]
data_type
:
dtype
backend
:
place
-
api
:
full_like
args
:
(Tensor x, Scalar value, DataType dtype = DataType::UNDEFINED, Place place = {})
output
:
Tensor
infer_meta
:
func
:
CreateLikeInferMeta
param
:
[
x
,
dtype
]
kernel
:
func
:
full_like
param
:
[
x
,
value
,
dtype
]
data_type
:
dtype > x
backend
:
place > x
data_transform
:
skip_transform
:
x
-
api
:
gather
args
:
(Tensor x, Tensor index, Scalar(int) axis=0)
output
:
Tensor(out)
infer_meta
:
func
:
GatherInferMeta
kernel
:
func
:
gather
data_type
:
x
backward
:
gather_grad
-
api
:
gather_nd
args
:
(Tensor x, Tensor index)
output
:
Tensor
infer_meta
:
func
:
GatherNdInferMeta
kernel
:
func
:
gather_nd
data_type
:
x
backward
:
gather_nd_grad
-
api
:
gather_tree
args
:
(Tensor ids, Tensor parents)
output
:
Tensor
infer_meta
:
func
:
GatherTreeMeta
kernel
:
func
:
gather_tree
-
api
:
gaussian_random
args
:
(IntArray shape, float mean, float std, int seed, DataType dtype, Place place={})
output
:
Tensor
infer_meta
:
func
:
GaussianRandomInferMeta
param
:
[
shape
,
mean
,
std
,
seed
,
dtype
]
kernel
:
func
:
gaussian_random
param
:
[
shape
,
mean
,
std
,
seed
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
gelu
args
:
(Tensor x, bool approximate)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
gelu
backward
:
gelu_grad
-
api
:
graph_send_recv
args
:
(Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", int64_t out_size = 0)
output
:
Tensor(out), Tensor(dst_count)
infer_meta
:
func
:
GraphSendRecvInferMeta
kernel
:
func
:
graph_send_recv
data_type
:
x
intermediate
:
dst_count
backward
:
graph_send_recv_grad
-
api
:
greater_equal
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
greater_equal
-
api
:
greater_than
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
greater_than
-
api
:
group_norm
args
:
(Tensor x, Tensor scale, Tensor bias, float epsilon, int groups, str data_layout)
output
:
Tensor(y), Tensor(mean), Tensor(variance)
infer_meta
:
func
:
GroupNormInferMeta
kernel
:
func
:
group_norm
optional
:
scale, bias
intermediate
:
mean, variance
backward
:
group_norm_grad
-
api
:
gumbel_softmax
args
:
(Tensor x, float temperature, bool hard, int axis)
output
:
Tensor
infer_meta
:
func
:
GumbelSoftmaxInferMeta
kernel
:
func
:
gumbel_softmax
backward
:
gumbel_softmax_grad
# hard_shrink
-
api
:
hard_shrink
args
:
(Tensor x, float threshold)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_shrink
backward
:
hard_shrink_grad
# hard_sigmoid
-
api
:
hard_sigmoid
args
:
(Tensor x, float slope, float offset)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_sigmoid
backward
:
hard_sigmoid_grad
-
api
:
hard_swish
args
:
(Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_swish
backward
:
hard_swish_grad
# histogram
-
api
:
histogram
args
:
(Tensor x, int64_t bins, int min, int max)
output
:
Tensor
infer_meta
:
func
:
HistogramInferMeta
kernel
:
func
:
histogram
-
api
:
huber_loss
args
:
(Tensor input, Tensor label, float delta)
output
:
Tensor(out), Tensor(residual)
infer_meta
:
func
:
HuberLossInferMeta
kernel
:
func
:
huber_loss
backward
:
huber_loss_grad
-
api
:
imag
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
RealAndImagInferMeta
kernel
:
func
:
imag
backward
:
imag_grad
# increment
-
api
:
increment
args
:
(Tensor x, float value)
output
:
Tensor
infer_meta
:
func
:
IncrementInferMeta
kernel
:
func
:
increment
-
api
:
index_sample
args
:
(Tensor x, Tensor index)
output
:
Tensor
infer_meta
:
func
:
IndexSampleInferMeta
kernel
:
func
:
index_sample
data_type
:
x
backward
:
index_sample_grad
-
api
:
index_select
args
:
(Tensor x, Tensor index, int dim)
output
:
Tensor(out)
infer_meta
:
func
:
IndexSelectInferMeta
kernel
:
func
:
index_select
data_type
:
x
backward
:
index_select_grad
-
api
:
instance_norm
args
:
(Tensor x, Tensor scale, Tensor bias, float epsilon)
output
:
Tensor(y), Tensor(saved_mean), Tensor(saved_variance)
infer_meta
:
func
:
InstanceNormInferMeta
kernel
:
func
:
instance_norm
data_type
:
x
optional
:
scale, bias
intermediate
:
saved_mean, saved_variance
backward
:
instance_norm_grad
# is_empty
-
api
:
is_empty
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
IsEmptyInferMeta
kernel
:
func
:
is_empty
-
api
:
isclose
args
:
(Tensor x, Tensor y, Scalar rtol, Scalar atol, bool equal_nan)
output
:
Tensor(out)
infer_meta
:
func
:
ValueCompareInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
isclose
# isfinite
-
api
:
isfinite
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
IsfiniteInferMeta
kernel
:
func
:
isfinite, infinite_sr
# isinf
-
api
:
isinf
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
IsfiniteInferMeta
kernel
:
func
:
isinf, isinf_sr
# isnan
-
api
:
isnan
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
IsfiniteInferMeta
kernel
:
func
:
isnan, isnan_sr
-
api
:
kldiv_loss
args
:
(Tensor x, Tensor label, str reduction)
output
:
Tensor(out)
infer_meta
:
func
:
KLDivInferMeta
kernel
:
func
:
kldiv_loss
data_type
:
x
backward
:
kldiv_loss_grad
-
api
:
kron
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
KronInferMeta
kernel
:
func
:
kron
backward
:
kron_grad
-
api
:
kthvalue
args
:
(Tensor x, int k, int axis, bool keepdim)
output
:
Tensor(out), Tensor(indices)
infer_meta
:
func
:
KthvalueInferMeta
kernel
:
func
:
kthvalue
backward
:
kthvalue_grad
# label_smooth
-
api
:
label_smooth
args
:
(Tensor label, Tensor prior_dist, float epsilon)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
label
]
kernel
:
func
:
label_smooth
data_type
:
label
optional
:
prior_dist
backward
:
label_smooth_grad
-
api
:
layer_norm
args
:
(Tensor x, Tensor scale, Tensor bias, float epsilon, int begin_norm_axis, bool is_test)
output
:
Tensor(out), Tensor(mean), Tensor(variance)
infer_meta
:
func
:
LayerNormInferMeta
kernel
:
func
:
layer_norm
data_type
:
x
backward
:
layer_norm_grad
optional
:
scale, bias
# leaky_relu
-
api
:
leaky_relu
args
:
(Tensor x, float alpha)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
leaky_relu
backward
:
leaky_relu_grad
-
api
:
lerp
args
:
(Tensor x, Tensor y, Tensor weight)
output
:
Tensor
infer_meta
:
func
:
LerpInferMeta
kernel
:
func
:
lerp
backward
:
lerp_grad
-
api
:
less_equal
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
less_equal
-
api
:
less_than
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
less_than
-
api
:
lgamma
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
lgamma
backward
:
lgamma_grad
-
api
:
linspace
args
:
(Tensor start, Tensor stop, Tensor number, DataType dtype)
output
:
Tensor
infer_meta
:
func
:
LinspaceInferMeta
kernel
:
func
:
linspace
data_type
:
dtype
-
api
:
log
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
log
backward
:
log_grad
-
api
:
log10
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
log10
backward
:
log10_grad
-
api
:
log1p
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
log1p
backward
:
log1p_grad
-
api
:
log2
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
log2
backward
:
log2_grad
# log_loss
-
api
:
log_loss
args
:
(Tensor input, Tensor label, float epsilon)
output
:
Tensor
infer_meta
:
func
:
LogLossInferMeta
kernel
:
func
:
log_loss
backward
:
log_loss_grad
-
api
:
log_softmax
args
:
(Tensor x, int axis)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMetaCheckAxis
kernel
:
func
:
log_softmax
backward
:
log_softmax_grad
-
api
:
logcumsumexp
args
:
(Tensor x, int axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(out)
infer_meta
:
func
:
CumInferMeta
kernel
:
func
:
logcumsumexp
backward
:
logcumsumexp_grad
# logical_and
-
api
:
logical_and
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
logical_and
# logical_not
-
api
:
logical_not
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
logical_not
# logical_or
-
api
:
logical_or
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
logical_or
# logical_xor
-
api
:
logical_xor
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
logical_xor
# logit
-
api
:
logit
args
:
(Tensor x, float eps = 1e-6f)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
logit
backward
:
logit_grad
# logsigmoid
-
api
:
logsigmoid
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
logsigmoid
backward
:
logsigmoid_grad
-
api
:
logsumexp
args
:
(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all)
output
:
Tensor(out)
infer_meta
:
func
:
LogsumexpInferMeta
kernel
:
func
:
logsumexp
backward
:
logsumexp_grad
# masked_select
-
api
:
masked_select
args
:
(Tensor x, Tensor mask)
output
:
Tensor
infer_meta
:
func
:
MaskedSelectInferMeta
kernel
:
func
:
masked_select
data_type
:
x
backward
:
masked_select_grad
-
api
:
matmul
args
:
(Tensor x, Tensor y, bool transpose_x =
false
, bool transpose_y =
false
)
output
:
Tensor
infer_meta
:
func
:
MatmulInferMeta
kernel
:
func
:
matmul
backward
:
matmul_grad
# matrix_power
-
api
:
matrix_power
args
:
(Tensor x, int n)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
matrix_power
backward
:
matrix_power_grad
-
api
:
matrix_rank
args
:
(Tensor x, float tol, bool use_default_tol=true, bool hermitian=false)
output
:
Tensor(out)
infer_meta
:
func
:
MatrixRankInferMeta
param
:
[
x
,
use_default_tol
,
hermitian
]
kernel
:
func
:
matrix_rank
-
api
:
matrix_rank_tol
args
:
(Tensor x, Tensor atol_tensor, bool use_default_tol=true, bool hermitian=false)
output
:
Tensor(out)
infer_meta
:
func
:
MatrixRankTolInferMeta
kernel
:
func
:
matrix_rank_tol
-
api
:
max
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
max
backward
:
max_grad
-
api
:
max_pool2d_with_index
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output
:
Tensor(out), Tensor(mask)
infer_meta
:
func
:
MaxPoolWithIndexInferMeta
kernel
:
func
:
max_pool2d_with_index
backward
:
max_pool2d_with_index_grad
-
api
:
max_pool3d_with_index
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output
:
Tensor(out), Tensor(mask)
infer_meta
:
func
:
MaxPoolWithIndexInferMeta
kernel
:
func
:
max_pool3d_with_index
backward
:
max_pool3d_with_index_grad
-
api
:
maximum
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
maximum
backward
:
maximum_grad
-
api
:
maxout
args
:
(Tensor x, int groups, int axis)
output
:
Tensor(out)
infer_meta
:
func
:
MaxOutInferMeta
kernel
:
func
:
maxout
backward
:
maxout_grad
-
api
:
mean
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
mean
backward
:
mean_grad
-
api
:
mean_all
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
MeanAllInferMeta
kernel
:
func
:
mean_all
backward
:
mean_all_grad
-
api
:
meshgrid
args
:
(Tensor[] inputs)
output
:
Tensor[]{inputs.size()}
infer_meta
:
func
:
MeshgridInferMeta
kernel
:
func
:
meshgrid
backward
:
meshgrid_grad
-
api
:
min
args
:
(Tensor x, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
ReduceInferMeta
kernel
:
func
:
min
backward
:
min_grad
-
api
:
minimum
args
:
(Tensor x, Tensor y)
output
:
Tensor(out)
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
minimum
backward
:
minimum_grad
-
api
:
mish
args
:
(Tensor x, float lambda)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mish
backward
:
mish_grad
-
api
:
mode
args
:
(Tensor x, int axis, bool keepdim)
output
:
Tensor(out), Tensor(indices)
infer_meta
:
func
:
ModeInferMeta
kernel
:
func
:
mode
backward
:
mode_grad
-
api
:
modulo
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
modulo
backward
:
modulo_grad
-
api
:
momentum
args
:
(Tensor param, Tensor grad, Tensor velocity, Tensor learning_rate, Tensor master_param, float mu, bool use_nesterov =
false
, str regularization_method = "", float regularization_coeff = 0.0, bool multi_precision =
false
, float rescale_grad = 1.0f)
output
:
Tensor(param_out), Tensor(velocity_out), Tensor(master_param_out)
invoke
:
momentum_impl(param, grad, velocity, learning_rate, master_param, mu, use_nesterov, regularization_method, regularization_coeff, multi_precision, rescale_grad)
optional
:
master_param
-
api
:
multi_dot
args
:
(Tensor[] x)
output
:
Tensor
infer_meta
:
func
:
MultiDotInferMeta
kernel
:
func
:
multi_dot
backward
:
multi_dot_grad
# multinomial
-
api
:
multinomial
args
:
(Tensor x, int num_samples, bool replacement)
output
:
Tensor
infer_meta
:
func
:
MultinomialInferMeta
kernel
:
func
:
multinomial
-
api
:
multiplex
args
:
(Tensor[] ins, Tensor ids)
output
:
Tensor
infer_meta
:
func
:
MultiplexInferMeta
kernel
:
func
:
multiplex
data_type
:
ins
backward
:
multiplex_grad
-
api
:
multiply
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
multiply
backward
:
multiply_grad
-
api
:
mv
args
:
(Tensor x, Tensor vec)
output
:
Tensor
infer_meta
:
func
:
MvInferMeta
kernel
:
func
:
mv
backward
:
mv_grad
-
api
:
nll_loss
args
:
(Tensor input, Tensor label, Tensor weight, int64_t ignore_index, str reduction)
output
:
Tensor(out), Tensor(total_weight)
infer_meta
:
func
:
NllLossRawInferMeta
kernel
:
func
:
nll_loss
data_type
:
input
optional
:
weight
backward
:
nll_loss_grad
-
api
:
norm
args
:
(Tensor x, int axis, float epsilon, bool is_test)
output
:
Tensor(out), Tensor(norm)
infer_meta
:
func
:
NormInferMeta
kernel
:
func
:
norm
intermediate
:
norm
backward
:
norm_grad
-
api
:
not_equal
args
:
(Tensor x, Tensor y, int axis = -1)
output
:
Tensor
infer_meta
:
func
:
CompareInferMeta
kernel
:
func
:
not_equal
-
api
:
one_hot
args
:
(Tensor x, Scalar(int) num_classes)
output
:
Tensor
infer_meta
:
func
:
OneHotInferMeta
kernel
:
func
:
one_hot
-
api
:
ones_like
args
:
(Tensor x, DataType dtype=DataType::UNDEFINED, Place place={})
output
:
Tensor
invoke
:
full_like(x, 1, dtype, place)
-
api
:
p_norm
args
:
(Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false)
output
:
Tensor(out)
infer_meta
:
func
:
PNormInferMeta
kernel
:
func
:
p_norm
backward
:
p_norm_grad
# pad
-
api
:
pad
args
:
(Tensor x, int[] paddings, float pad_value)
output
:
Tensor
infer_meta
:
func
:
PadInferMeta
kernel
:
func
:
pad
backward
:
pad_grad
-
api
:
pad3d
args
:
(Tensor x, IntArray paddings, str mode, float pad_value, str data_format)
output
:
Tensor(out)
infer_meta
:
func
:
Pad3dInferMeta
kernel
:
func
:
pad3d
backward
:
pad3d_grad
# pixel_shuffle
-
api
:
pixel_shuffle
args
:
(Tensor x, int upscale_factor, str data_format)
output
:
Tensor
infer_meta
:
func
:
PixelShuffleInferMeta
kernel
:
func
:
pixel_shuffle
backward
:
pixel_shuffle_grad
# poisson
-
api
:
poisson
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
poisson
backward
:
poisson_grad
-
api
:
pool2d
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(out)
infer_meta
:
func
:
PoolInferMeta
kernel
:
func
:
pool2d
use_gpudnn
:
true
backward
:
pool2d_grad
# Used in adaptive_avg_pool2d API
-
api
:
pool2d_gpudnn_unused
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(out)
infer_meta
:
func
:
PoolInferMeta
kernel
:
func
:
pool2d
use_gpudnn
:
false
backward
:
pool2d_grad_gpudnn_unused
-
api
:
pool3d
args
:
(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(out)
infer_meta
:
func
:
PoolInferMeta
kernel
:
func
:
pool3d
use_gpudnn
:
true
backward
:
pool3d_grad
-
api
:
pow
args
:
(Tensor x, Scalar s)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pow
backward
:
pow_grad
-
api
:
prelu
args
:
(Tensor x, Tensor alpha, str data_format, str mode)
output
:
Tensor(out)
infer_meta
:
func
:
PReluInferMeta
kernel
:
func
:
prelu
backward
:
prelu_grad
-
api
:
psroi_pool
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
output
:
Tensor
infer_meta
:
func
:
PsroiPoolInferMeta
kernel
:
func
:
psroi_pool
data_type
:
x
optional
:
boxes_num
backward
:
psroi_pool_grad
# put_along_axis
-
api
:
put_along_axis
args
:
(Tensor x, Tensor index, Tensor value, int axis, str reduce)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
index
]
kernel
:
func
:
put_along_axis
data_type
:
x
backward
:
put_along_axis_grad
-
api
:
qr
args
:
(Tensor x, str mode)
output
:
Tensor(q), Tensor(r)
infer_meta
:
func
:
QrInferMeta
kernel
:
func
:
qr
# backward : qr_grad
-
api
:
randint
args
:
(int low, int high, IntArray shape, DataType dtype=DataType::INT64, Place place={})
output
:
Tensor(out)
infer_meta
:
func
:
RandintInferMeta
param
:
[
low
,
high
,
shape
,
dtype
]
kernel
:
func
:
randint
param
:
[
low
,
high
,
shape
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
randperm
args
:
(int n, DataType dtype, Place place={})
output
:
Tensor
infer_meta
:
func
:
RandpermInferMeta
param
:
[
n
,
dtype
]
kernel
:
func
:
randperm
param
:
[
n
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
real
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
RealAndImagInferMeta
kernel
:
func
:
real
backward
:
real_grad
-
api
:
reciprocal
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
reciprocal
backward
:
reciprocal_grad
# reduce_prod
-
api
:
reduce_prod
args
:
(Tensor x, int64_t[] dims, bool keep_dim, bool reduce_all)
output
:
Tensor
infer_meta
:
func
:
ReduceInferMetaBase
kernel
:
func
:
prod_raw
backward
:
reduce_prod_grad
-
api
:
relu
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
relu
inplace
:
(x -> out)
backward
:
relu_grad
-
api
:
reshape
args
:
(Tensor x, IntArray shape)
output
:
Tensor(out), Tensor(xshape)
infer_meta
:
func
:
ReshapeWithXShapeInferMeta
kernel
:
func
:
reshape_with_xshape
inplace
:
(x -> out)
view
:
(x -> out)
intermediate
:
xshape
backward
:
reshape_grad
-
api
:
roi_align
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned)
output
:
Tensor
infer_meta
:
func
:
RoiAlignInferMeta
kernel
:
func
:
roi_align
data_type
:
x
optional
:
boxes_num
backward
:
roi_align_grad
-
api
:
roi_pool
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale)
output
:
Tensor(out), Tensor(arg_max)
infer_meta
:
func
:
RoiPoolInferMeta
kernel
:
func
:
roi_pool
data_type
:
x
optional
:
boxes_num
intermediate
:
arg_max
backward
:
roi_pool_grad
-
api
:
roll
args
:
(Tensor x, IntArray shifts, int64_t[] axis)
output
:
Tensor(out)
infer_meta
:
func
:
RollInferMeta
kernel
:
func
:
roll
backward
:
roll_grad
-
api
:
round
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
round
backward
:
round_grad
-
api
:
rsqrt
args
:
(Tensor x)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
rsqrt
inplace
:
(x -> out)
backward
:
rsqrt_grad
-
api
:
scale
args
:
(Tensor x, Scalar scale, float bias, bool bias_after_scale)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
scale, scale_sr
inplace
:
(x -> out)
backward
:
scale_grad
-
api
:
scatter
args
:
(Tensor x, Tensor index, Tensor updates, bool overwrite)
output
:
Tensor
infer_meta
:
func
:
ScatterInferMeta
dtype
:
x
kernel
:
func
:
scatter
backward
:
scatter_grad
-
api
:
scatter_nd_add
args
:
(Tensor x, Tensor index, Tensor updates)
output
:
Tensor
infer_meta
:
func
:
ScatterNdAddInferMeta
dtype
:
x
kernel
:
func
:
scatter_nd_add
backward
:
scatter_nd_add_grad
-
api
:
searchsorted
args
:
(Tensor sorted_sequence, Tensor value, bool out_int32, bool right)
output
:
Tensor(out)
infer_meta
:
func
:
SearchsortedInferMeta
kernel
:
func
:
searchsorted
data_type
:
sorted_sequence
# segment_pool
-
api
:
segment_pool
args
:
(Tensor x, Tensor segment_ids, str pooltype)
output
:
Tensor(out), Tensor(summed_ids)
infer_meta
:
func
:
SegmentPoolInferMeta
kernel
:
func
:
segment_pool
data_type
:
x
backward
:
segment_pool_grad
# selu
-
api
:
selu
args
:
(Tensor x, float scale, float alpha)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
selu
backward
:
selu_grad
-
api
:
sgd
args
:
(Tensor param, Tensor learning_rate, Tensor grad, Tensor master_param, bool multi_precision)
output
:
Tensor(param_out), Tensor(master_param_out)
invoke
:
sgd_impl(param, learning_rate, grad, master_param, multi_precision)
optional
:
master_param
-
api
:
shape
args
:
(Tensor input)
output
:
Tensor
infer_meta
:
func
:
ShapeInferMeta
kernel
:
func
:
shape, shape_sr
data_transform
:
skip_transform
:
input
# shard_index
-
api
:
shard_index
args
:
(Tensor in, int index_num, int nshards, int shard_id, int ignore_value)
output
:
Tensor
infer_meta
:
func
:
ShardIndexInferMeta
kernel
:
func
:
shard_index
# sigmoid
-
api
:
sigmoid
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sigmoid
backward
:
sigmoid_grad
# sigmoid_cross_entropy_with_logits
-
api
:
sigmoid_cross_entropy_with_logits
args
:
(Tensor x, Tensor label, bool normalize, int ignore_index)
output
:
Tensor
infer_meta
:
func
:
SigmoidCrossEntropyWithLogitsInferMeta
kernel
:
func
:
sigmoid_cross_entropy_with_logits
backward
:
sigmoid_cross_entropy_with_logits_grad
-
api
:
sign
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sign
# silu
-
api
:
silu
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
silu
backward
:
silu_grad
# sin
-
api
:
sin
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sin
backward
:
sin_grad
# sinh
-
api
:
sinh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sinh
backward
:
sinh_grad
# size
-
api
:
size
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
SizeInferMeta
kernel
:
func
:
size
data_transform
:
skip_transform
:
x
-
api
:
slice
args
:
(Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
output
:
Tensor
infer_meta
:
func
:
SliceRawInferMeta
kernel
:
func
:
slice
backward
:
slice_grad
# soft_shrink
-
api
:
soft_shrink
args
:
(Tensor x, float lambda)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
soft_shrink
backward
:
soft_shrink_grad
-
api
:
softmax
args
:
(Tensor x, int axis)
output
:
Tensor
infer_meta
:
func
:
SoftmaxInferMeta
kernel
:
func
:
softmax
use_gpudnn
:
true
backward
:
softmax_grad
-
api
:
split
args
:
(Tensor x, IntArray num_or_sections, Scalar(int) axis)
output
:
Tensor[]
invoke
:
split_impl(x, num_or_sections, axis)
backward
:
split_grad
-
api
:
sqrt
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
sqrt
backward
:
sqrt_grad
-
api
:
square
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
square
backward
:
square_grad
-
api
:
squeeze
args
:
(Tensor x, int[] axes)
output
:
Tensor(out), Tensor(xshape)
infer_meta
:
func
:
SqueezeInferMeta
kernel
:
func
:
squeeze
view
:
(x -> out)
intermediate
:
xshape
backward
:
squeeze_grad
-
api
:
stack
args
:
(Tensor[] x, int axis)
output
:
Tensor
infer_meta
:
func
:
StackInferMeta
kernel
:
func
:
stack
backward
:
stack_grad
-
api
:
strided_slice
args
:
(Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides)
output
:
Tensor
infer_meta
:
func
:
StridedSliceInferMeta
kernel
:
func
:
strided_slice
backward
:
strided_slice_grad
-
api
:
subtract
args
:
(Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
ElementwiseInferMeta
kernel
:
func
:
subtract
backward
:
subtract_grad
-
api
:
sum
args
:
(Tensor x, int64_t[] dims={}, DataType out_dtype=DataType::UNDEFINED, bool keep_dim=false)
output
:
Tensor(out)
infer_meta
:
func
:
SumInferMeta
kernel
:
func
:
sum
data_type
:
x
backward
:
sum_grad
# The python API paddle.nn.functional.swish has no `bete` argument, it may be removed later
-
api
:
swish
args
:
(Tensor x, float beta=1.0)
output
:
Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
swish
backward
:
swish_grad
# take_along_axis
-
api
:
take_along_axis
args
:
(Tensor x, Tensor index, int axis)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
index
]
kernel
:
func
:
take_along_axis
data_type
:
x
backward
:
take_along_axis_grad
# tan
-
api
:
tan
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
tan
backward
:
tan_grad
# tanh
-
api
:
tanh
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
tanh
backward
:
tanh_grad
# tanh_shrink
-
api
:
tanh_shrink
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
tanh_shrink
backward
:
tanh_shrink_grad
# thresholded_relu
-
api
:
thresholded_relu
args
:
(Tensor x, float threshold)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
thresholded_relu
backward
:
thresholded_relu_grad
# tile
-
api
:
tile
args
:
(Tensor x, IntArray repeat_times)
output
:
Tensor
infer_meta
:
func
:
TileInferMeta
kernel
:
func
:
tile
backward
:
tile_grad
-
api
:
top_k
args
:
(Tensor x, Scalar k, int axis = -1, bool largest =
true
, bool sorted =
true
)
output
:
Tensor(out), Tensor(indices)
infer_meta
:
func
:
TopKInferMeta
kernel
:
func
:
top_k
backward
:
top_k_grad
-
api
:
trace
args
:
(Tensor x, int offset, int axis1, int axis2)
output
:
Tensor
infer_meta
:
func
:
TraceInferMeta
kernel
:
func
:
trace
backward
:
trace_grad
-
api
:
transpose
args
:
(Tensor x, int[] axis)
output
:
Tensor
infer_meta
:
func
:
TransposeInferMeta
kernel
:
func
:
transpose
backward
:
transpose_grad
-
api
:
triangular_solve
args
:
(Tensor x, Tensor y, bool upper, bool transpose, bool unitriangular)
output
:
Tensor
infer_meta
:
func
:
TriangularSolveInferMeta
kernel
:
func
:
triangular_solve
backward
:
triangular_solve_grad
-
api
:
tril_indices
args
:
(int rows, int cols, int offset, DataType dtype, Place place={})
output
:
Tensor(out)
infer_meta
:
func
:
TrilIndicesInferMeta
param
:
[
rows
,
cols
,
offset
,
dtype
]
kernel
:
func
:
tril_indices
param
:
[
rows
,
cols
,
offset
,
dtype
]
data_type
:
dtype
backend
:
place
-
api
:
tril_triu
args
:
(Tensor x, int diagonal, bool lower)
output
:
Tensor(out)
infer_meta
:
func
:
TrilTriuInferMeta
kernel
:
func
:
tril_triu
backward
:
tril_triu_grad
-
api
:
trunc
args
:
(Tensor x)
output
:
Tensor
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
trunc
backward
:
trunc_grad
# python API: paddle.nn.initializer.TruncatedNormal
-
api
:
truncated_gaussian_random
args
:
(int[] shape, float mean, float std, int seed, DataType dtype=DataType::FLOAT32, Place place={})
output
:
Tensor
infer_meta
:
func
:
TruncatedGaussianRandomInferMeta
param
:
[
shape
,
mean
,
std
,
seed
,
dtype
]
kernel
:
func
:
truncated_gaussian_random
param
:
[
shape
,
mean
,
std
,
seed
,
dtype
]
backend
:
place
data_type
:
dtype
-
api
:
unbind
args
:
(Tensor input, int axis)
output
:
Tensor[] {axis<0 ? input.dims()[input.dims().size()+axis]:input.dims()[axis]}
infer_meta
:
func
:
UnbindInferMeta
kernel
:
func
:
unbind
backward
:
unbind_grad
# unfold
-
api
:
unfold
args
:
(Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output
:
Tensor
infer_meta
:
func
:
UnfoldInferMeta
kernel
:
func
:
unfold
backward
:
unfold_grad
-
api
:
uniform_random
args
:
(IntArray shape, DataType dtype, float min, float max, int seed, Place place={})
output
:
Tensor(out)
infer_meta
:
func
:
UniformRandomInferMeta
param
:
[
shape
,
dtype
,
min
,
max
,
seed
]
kernel
:
func
:
uniform_random
param
:
[
shape
,
dtype
,
min
,
max
,
seed
]
data_type
:
dtype
backend
:
place
# The `axis` argument of Python API paddle.unique is not vector
-
api
:
unique
args
:
(Tensor x, bool return_index, bool return_inverse, bool return_counts, int[] axis, DataType dtype=DataType::INT64)
output
:
Tensor(out), Tensor(indices), Tensor(inverse), Tensor(counts)
infer_meta
:
func
:
UniqueInferMeta
kernel
:
func
:
unique
data_type
:
x
-
api
:
unsqueeze
args
:
(Tensor x, IntArray axis)
output
:
Tensor(out), Tensor(xshape)
infer_meta
:
func
:
UnsqueezeInferMeta
kernel
:
func
:
unsqueeze
view
:
(x -> out)
intermediate
:
xshape
backward
:
unsqueeze_grad
# viterbi_decode
-
api
:
viterbi_decode
args
:
(Tensor input, Tensor transition, Tensor length, bool include_bos_eos_tag)
output
:
Tensor(scores), Tensor(path)
infer_meta
:
func
:
ViterbiDecodeInferMeta
kernel
:
func
:
viterbi_decode
data_type
:
input
-
api
:
where
args
:
(Tensor condition, Tensor x, Tensor y)
output
:
Tensor
infer_meta
:
func
:
WhereInferMeta
kernel
:
func
:
where
backward
:
where_grad
# where_index
-
api
:
where_index
args
:
(Tensor condition)
output
:
Tensor
infer_meta
:
func
:
WhereIndexInferMeta
kernel
:
func
:
where_index
# yolo_box
-
api
:
yolo_box
args
:
(Tensor x, Tensor img_size, int[] anchors, int class_num, float conf_thresh, int downsample_ratio, bool clip_bbox, float scale_x_y=1.0, bool iou_aware=false, float iou_aware_factor=0.5)
output
:
Tensor(boxes), Tensor(scores)
infer_meta
:
func
:
YoloBoxInferMeta
kernel
:
func
:
yolo_box
data_type
:
x
-
api
:
zeros_like
args
:
(Tensor x, DataType dtype=DataType::UNDEFINED, Place place = {})
output
:
Tensor
invoke
:
full_like(x, 0, dtype, place)
python/paddle/utils/code_gen/legacy_backward.yaml
0 → 100644
浏览文件 @
fcd32950
-
backward_api
:
abs_double_grad
forward
:
abs_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_x_grad)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
abs_double_grad
data_transform
:
skip_transform
:
grad_x_grad
-
backward_api
:
abs_grad
forward
:
abs (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
abs_grad
data_transform
:
skip_transform
:
out_grad
backward
:
abs_double_grad
-
backward_api
:
acos_grad
forward
:
acos (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
acos_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
acosh_grad
forward
:
acosh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
acosh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
add_double_grad
forward
:
add_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
grad_out
]
kernel
:
func
:
add_double_grad
optional
:
grad_x_grad, grad_y_grad
backward
:
add_triple_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
add_grad
forward
:
add (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
add_grad
no_need_buffer
:
x, y
backward
:
add_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
add_n_grad
forward
:
add_n (Tensor[] x) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad)
output
:
Tensor[](x_grad){x.size()}
invoke
:
add_n_grad_impl(x, out_grad, x_grad)
no_need_buffer
:
x
-
backward_api
:
add_triple_grad
forward
:
add_double_grad (Tensor y, Tensor grad_out, Tensor grad_grad_x, Tensor grad_grad_y, int axis = -1) -> Tensor(grad_grad_out)
args
:
(Tensor grad_grad_x, Tensor grad_grad_y, Tensor grad_grad_out_grad, int axis = -1)
output
:
Tensor(grad_grad_x_grad), Tensor(grad_grad_y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
grad_grad_x
,
grad_grad_y
]
kernel
:
func
:
add_triple_grad
inplace
:
(grad_grad_out_grad -> grad_grad_x_grad)
-
backward_api
:
addmm_grad
forward
:
addmm (Tensor input, Tensor x, Tensor y, float alpha, float beta) -> Tensor(out)
args
:
(Tensor input, Tensor x, Tensor y, Tensor out_grad, float alpha, float beta)
output
:
Tensor(input_grad), Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
input
,
x
,
y
]
kernel
:
func
:
addmm_grad
-
backward_api
:
argsort_grad
forward
:
argsort (Tensor x, int axis, bool descending) -> Tensor(out), Tensor(indices)
args
:
(Tensor indices, Tensor x, Tensor out_grad, int axis, bool descending)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
argsort_grad
no_need_buffer
:
x
-
backward_api
:
asin_grad
forward
:
asin (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
asin_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
asinh_grad
forward
:
asinh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
asinh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
assign_grad
forward
:
assign (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
assign
inplace
:
(out_grad -> x_grad)
-
backward_api
:
assign_out__grad
forward
:
assign_out_ (Tensor x, Tensor output) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
kernel
:
func
:
assign
inplace
:
(out_grad -> x_grad)
-
backward_api
:
atan2_grad
forward
:
atan2 (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
atan2_grad
-
backward_api
:
atan_grad
forward
:
atan (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
atan_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
atanh_grad
forward
:
atanh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
atanh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
batch_norm_double_grad
forward
:
batch_norm_grad (Tensor x, Tensor scale, Tensor bias, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor grad_out, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias)
args
:
(Tensor x, Tensor scale, Tensor out_mean, Tensor out_variance, Tensor saved_mean, Tensor saved_variance, Tensor grad_out, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
x
,
scale
,
x
]
kernel
:
func
:
batch_norm_grad_grad
data_type
:
x
optional
:
out_mean, out_variance
inplace
:
(grad_out -> grad_out_grad)
-
backward_api
:
batch_norm_grad
forward
:
batch_norm (Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu) -> Tensor(out), Tensor(mean_out), Tensor(variance_out), Tensor(saved_mean), Tensor(saved_variance), Tensor(reserve_space)
args
:
(Tensor x, Tensor scale, Tensor bias, Tensor mean_out, Tensor variance_out, Tensor saved_mean, Tensor saved_variance, Tensor reserve_space, Tensor out_grad, float momentum, float epsilon, str data_layout, bool is_test, bool use_global_stats, bool trainable_statistics, bool fuse_with_relu)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
x
,
scale
,
bias
]
kernel
:
func
:
batch_norm_grad
data_type
:
out_grad
optional
:
mean_out, variance_out, reserve_space
backward
:
batch_norm_double_grad
-
backward_api
:
bce_loss_grad
forward
:
bce_loss (Tensor input, Tensor label) -> Tensor(out)
args
:
(Tensor input, Tensor label, Tensor out_grad)
output
:
Tensor(input_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
input
]
kernel
:
func
:
bce_loss_grad
inplace
:
(out_grad -> input_grad)
-
backward_api
:
brelu_grad
forward
:
brelu (Tensor x, float t_min, float t_max) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float t_min, float t_max)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
brelu_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
cast_grad
forward
:
cast (Tensor x, DataType out_dtype) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cast_grad
data_type
:
out_grad
no_need_buffer
:
x
-
backward_api
:
ceil_grad
forward
:
ceil(Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
ceil_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
celu_double_grad
forward
:
celu_grad(Tensor x, Tensor grad_out, float alpha) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_out, Tensor grad_x_grad, float alpha)
output
:
Tensor(x_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
x
]
kernel
:
func
:
celu_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
celu_grad
forward
:
celu(Tensor x, float alpha) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
celu_grad
backward
:
celu_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
cholesky_grad
forward
:
cholesky (Tensor x, bool upper) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, bool upper)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
cholesky_grad
-
backward_api
:
cholesky_solve_grad
forward
:
cholesky_solve (Tensor x, Tensor y, bool upper) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
cholesky_solve_grad
-
backward_api
:
clip_double_grad
forward
:
clip_grad (Tensor x, Tensor grad_out, Scalar min = 0., Scalar max = 0.) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_x_grad, Scalar min = 0., Scalar max = 0.)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
clip_grad
-
backward_api
:
clip_grad
forward
:
clip (Tensor x, Scalar min, Scalar max) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, Scalar min = 0., Scalar max = 0.)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
clip_grad
backward
:
clip_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
concat_double_grad
forward
:
concat_grad (Tensor[] x, Tensor grad_out, Scalar axis) -> Tensor[](grad_x)
args
:
(Tensor[] grad_x_grad, Scalar axis = 0)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
ConcatInferMeta
param
:
[
grad_x_grad
,
axis
]
kernel
:
func
:
concat
-
backward_api
:
concat_grad
forward
:
concat (Tensor[] x, Scalar axis) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad, Scalar axis = 0)
output
:
Tensor[](x_grad){x.size()}
infer_meta
:
func
:
UnchangedMultiInferMeta
param
:
[
x
]
kernel
:
func
:
concat_grad
no_need_buffer
:
x
backward
:
concat_double_grad
-
backward_api
:
conj_grad
forward
:
conj (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
conj
-
backward_api
:
conv2d_grad
forward
:
conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
args
:
(Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor(input_grad), Tensor(filter_grad)
invoke
:
conv2d_grad_impl(input, filter, out_grad, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, input_grad, filter_grad)
backward
:
conv2d_grad_grad
-
backward_api
:
conv2d_grad_grad
forward
:
conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(grad_input), Tensor(grad_filter)
args
:
(Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
input
,
filter
,
grad_out
]
kernel
:
func
:
conv2d_grad_grad
use_gpudnn
:
true
optional
:
grad_input_grad, grad_filter_grad
-
backward_api
:
conv2d_transpose_double_grad
forward
:
conv2d_transpose_grad(Tensor x, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(grad_x), Tensor(grad_filter)
args
:
(Tensor x, Tensor filter, Tensor grad_out, Tensor grad_x_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(x_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
Conv2dTransposeDoubleGradInferMeta
kernel
:
func
:
conv2d_transpose_grad_grad
use_gpudnn
:
true
-
backward_api
:
conv2d_transpose_grad
forward
:
conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args
:
(Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(x_grad), Tensor(filter_grad)
infer_meta
:
func
:
ConvTransposeGradInferMeta
kernel
:
func
:
conv2d_transpose_grad
use_gpudnn
:
true
backward
:
conv2d_transpose_double_grad
-
backward_api
:
conv3d_grad
forward
:
conv3d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(out)
args
:
(Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor(input_grad), Tensor(filter_grad)
invoke
:
conv3d_grad_impl(input, filter, out_grad, strides, paddings, paddding_algorithm, groups, dilations, data_format, use_addto, workspace_size_MB, exhaustive_search, input_grad, filter_grad)
backward
:
conv3d_grad_grad
-
backward_api
:
conv3d_grad_grad
forward
:
conv3d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search) -> Tensor(grad_input), Tensor(grad_filter)
args
:
(Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search)
output
:
Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
input
,
filter
,
grad_out
]
kernel
:
func
:
conv3d_grad_grad
use_gpudnn
:
true
optional
:
grad_input_grad, grad_filter_grad
-
backward_api
:
conv3d_transpose_grad
forward
:
conv3d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args
:
(Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(x_grad), Tensor(filter_grad)
infer_meta
:
func
:
ConvTransposeGradInferMeta
kernel
:
func
:
conv3d_transpose_grad
use_gpudnn
:
true
-
backward_api
:
cos_grad
forward
:
cos (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cos_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
cosh_grad
forward
:
cosh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cosh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
cross_entropy_with_softmax_grad
forward
:
cross_entropy_with_softmax (Tensor input, Tensor label, bool soft_label, bool use_softmax, bool numeric_stable_mode, int ignore_index, int axis) -> Tensor(softmax), Tensor(loss)
args
:
(Tensor label, Tensor softmax, Tensor loss_grad, bool soft_label, bool use_softmax, bool numeric_stable_mode, int ignore_index, int axis)
output
:
Tensor(input_grad)
infer_meta
:
func
:
CrossEntropyWithSoftmaxGradInferMeta
kernel
:
func
:
cross_entropy_with_softmax_grad
data_type
:
softmax
inplace
:
(softmax -> input_grad)
-
backward_api
:
cross_grad
forward
:
cross (Tensor x, Tensor y, int axis = 9) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
cross_grad
-
backward_api
:
cumprod_grad
forward
:
cumprod (Tensor x, int dim) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int dim)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
cumprod_grad
-
backward_api
:
cumsum_grad
forward
:
cumsum(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
args
:
(Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(x_grad)
invoke
:
cumsum(out_grad, axis, flatten, exclusive, !reverse)
-
backward_api
:
deformable_conv_grad
forward
:
deformable_conv(Tensor x, Tensor offset, Tensor filter, Tensor mask, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step) -> Tensor(out)
args
:
(Tensor x, Tensor offset, Tensor filter, Tensor mask, Tensor out_grad, int[] strides, int[] paddings, int[] dilations, int deformable_groups, int groups, int im2col_step)
output
:
Tensor(x_grad), Tensor(offset_grad), Tensor(filter_grad), Tensor(mask_grad)
infer_meta
:
func
:
DeformableConvGradInferMeta
kernel
:
func
:
deformable_conv_grad
data_type
:
x
optional
:
mask
-
backward_api
:
depthwise_conv2d_grad
forward
:
depthwise_conv2d (Tensor input, Tensor filter, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn) -> Tensor(out)
args
:
(Tensor input, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn)
output
:
Tensor(input_grad), Tensor(filter_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
input
,
filter
]
kernel
:
func
:
depthwise_conv2d_grad
param
:
[
input
,
filter
,
out_grad
,
strides
,
paddings
,
paddding_algorithm
,
groups
,
dilations
,
data_format
,
use_addto
,
workspace_size_MB
,
exhaustive_search
,
fuse_relu
]
use_gpudnn
:
use_gpudnn
backward
:
depthwise_conv2d_grad_grad
-
backward_api
:
depthwise_conv2d_grad_grad
forward
:
depthwise_conv2d_grad (Tensor input, Tensor filter, Tensor grad_out, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu, bool use_gpudnn) -> Tensor(grad_input), Tensor(grad_filter)
args
:
(Tensor input, Tensor filter, Tensor grad_out, Tensor grad_input_grad, Tensor grad_filter_grad, int[] strides, int[] paddings, str paddding_algorithm, int groups, int[] dilations, str data_format, bool use_addto, int workspace_size_MB, bool exhaustive_search, bool fuse_relu)
output
:
Tensor(input_grad), Tensor(filter_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
input
,
filter
,
grad_out
]
kernel
:
func
:
depthwise_conv2d_grad_grad
optional
:
grad_input_grad, grad_filter_grad
-
backward_api
:
depthwise_conv2d_transpose_grad
forward
:
depthwise_conv2d_transpose(Tensor x, Tensor filter, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format) -> Tensor(out)
args
:
(Tensor x, Tensor filter, Tensor out_grad, int[] strides, int[] paddings, int[] output_padding, int[] output_size, str padding_algorithm, int groups, int[] dilations, str data_format)
output
:
Tensor(x_grad), Tensor(filter_grad)
infer_meta
:
func
:
ConvTransposeGradInferMeta
kernel
:
func
:
depthwise_conv2d_transpose_grad
-
backward_api
:
det_grad
forward
:
det (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
determinant_grad
-
backward_api
:
diagonal_grad
forward
:
diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
diagonal_grad
no_need_buffer
:
x
-
backward_api
:
digamma_grad
forward
:
digamma (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
digamma_grad
-
backward_api
:
dist_grad
forward
:
dist (Tensor x, Tensor y, float p) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, float p)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
dist_grad
-
backward_api
:
divide_double_grad
forward
:
divide_grad (Tensor x, Tensor y, Tensor out, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor y, Tensor out, Tensor grad_x, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
output
:
Tensor(y_grad), Tensor(out_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
y
,
grad_x
,
grad_x
]
kernel
:
func
:
divide_double_grad
data_type
:
out
optional
:
grad_x_grad, grad_y_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
divide_grad
forward
:
divide (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
divide_grad
backward
:
divide_double_grad
-
backward_api
:
dropout_grad
forward
:
dropout (Tensor x, Tensor seed_tensor, float p, bool is_test, str mode, int seed, bool fix_seed) -> Tensor(out), Tensor(mask)
args
:
(Tensor mask, Tensor out_grad, float p, bool is_test, str mode)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
dropout_grad
-
backward_api
:
eigh_grad
forward
:
eigh (Tensor x, str uplo) -> Tensor(out_w), Tensor(out_v)
args
:
(Tensor out_w, Tensor out_v, Tensor out_w_grad, Tensor out_v_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_v
]
kernel
:
func
:
eigh_grad
data_type
:
out_v
data_transform
:
skip_transform
:
out_w, out_w_grad
-
backward_api
:
einsum_grad
forward
:
einsum (Tensor[] x, str equation) -> Tensor(out), Tensor[](inner_cache), Tensor[](x_shape)
args
:
(Tensor[] x_shape, Tensor[] inner_cache, Tensor out_grad, str equation)
output
:
Tensor[](x_grad){x.size()}
infer_meta
:
func
:
UnchangedMultiInferMeta
param
:
[
x_shape
]
kernel
:
func
:
einsum_grad
-
backward_api
:
elementwise_pow_grad
forward
:
elementwise_pow(Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis=-1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
elementwise_pow_grad
-
backward_api
:
elu_double_grad
forward
:
elu_grad (Tensor x, Tensor out, Tensor grad_out, float alpha)-> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_out, Tensor grad_x_grad, float alpha)
output
:
Tensor(x_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
x
]
kernel
:
func
:
elu_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
elu_grad
forward
:
elu (Tensor x, float alpha) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
elu_grad
backward
:
elu_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
embedding_grad
forward
:
embedding (Tensor x, Tensor weight, int64_t padding_idx=-1, bool sparse=false) -> Tensor(out)
args
:
(Tensor x, Tensor weight, Tensor out_grad, int64_t padding_idx=-1, bool sparse=false)
output
:
Tensor(weight_grad)
invoke
:
embedding_grad_impl(x, weight, out_grad, padding_idx, sparse, weight_grad)
-
backward_api
:
erf_grad
forward
:
erf (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
erf_grad
data_type
:
out_grad
-
backward_api
:
erfinv_grad
forward
:
erfinv (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
erfinv_grad
-
backward_api
:
exp_grad
forward
:
exp (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
exp_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
expand_as_grad
forward
:
expand_as (Tensor x, Tensor y, int[] target_shape) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int[] target_shape)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
expand_as_grad
no_need_buffer
:
x
-
backward_api
:
expand_double_grad
forward
:
expand_grad (Tensor x, Tensor grad_out, IntArray shape) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, IntArray shape)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
ExpandInferMeta
kernel
:
func
:
expand
-
backward_api
:
expand_grad
forward
:
expand (Tensor x, IntArray shape) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, IntArray shape)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
expand_grad
no_need_buffer
:
x
backward
:
expand_double_grad
-
backward_api
:
expm1_grad
forward
:
expm1 (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
expm1_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
flatten_grad
forward
:
flatten(Tensor x, int start_axis, int stop_axis) -> Tensor(out), Tensor(xshape)
args
:
(Tensor xshape, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
KernelWithXShapeInferMeta
param
:
[
xshape
]
kernel
:
func
:
flatten_grad
data_type
:
out_grad
backend
:
out_grad
layout
:
out_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
flip_grad
forward
:
flip (Tensor x, int[] axis) -> Tensor(out)
args
:
(Tensor out_grad, int[] axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
flip
-
backward_api
:
floor_grad
forward
:
floor(Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
floor_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
fmax_grad
forward
:
fmax(Tensor x, Tensor y, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
fmax_grad
-
backward_api
:
fmin_grad
forward
:
fmin(Tensor x, Tensor y, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
fmin_grad
-
backward_api
:
frobenius_norm_grad
forward
:
frobenius_norm(Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keep_dim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
frobenius_norm_grad
-
backward_api
:
gather_grad
forward
:
gather(Tensor x, Tensor index, Scalar axis=0) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, Scalar axis=0, bool overwrite=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
data_type
:
x
func
:
gather_grad
no_need_buffer
:
x
-
backward_api
:
gather_nd_grad
forward
:
gather_nd (Tensor x, Tensor index) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
gather_nd_grad
no_need_buffer
:
x
-
backward_api
:
gelu_grad
forward
:
gelu(Tensor x, bool approximate) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, bool approximate)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
gelu_grad
-
backward_api
:
graph_send_recv_grad
forward
:
graph_send_recv (Tensor x, Tensor src_index, Tensor dst_index, str pool_type = "SUM", int64_t out_size = 0) -> Tensor(out), Tensor(dst_count)
args
:
(Tensor x, Tensor src_index, Tensor dst_index, Tensor out, Tensor dst_count, Tensor out_grad, str pool_type = "SUM")
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
graph_send_recv_grad
data_type
:
out_grad
optional
:
out, dst_count
-
backward_api
:
group_norm_grad
forward
:
group_norm (Tensor x, Tensor scale, Tensor bias, float epsilon, int groups, str data_layout) -> Tensor(y), Tensor(mean), Tensor(variance)
args
:
(Tensor x, Tensor scale, Tensor bias, Tensor y, Tensor mean, Tensor variance, Tensor y_grad, float epsilon, int groups, str data_layout)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
y
,
scale
,
bias
]
kernel
:
func
:
group_norm_grad
data_type
:
y_grad
optional
:
scale, bias
inplace
:
(y_grad -> x_grad)
-
backward_api
:
gumbel_softmax_grad
forward
:
gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GumbelSoftmaxGradInferMeta
param
:
[
out
,
out_grad
,
axis
]
kernel
:
func
:
gumbel_softmax_grad
-
backward_api
:
hard_shrink_grad
forward
:
hard_shrink (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_shrink_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
hard_sigmoid_grad
forward
:
hard_sigmoid (Tensor x, float slope, float offset) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, float slope, float offset)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
hard_sigmoid_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
hard_swish_grad
forward
:
hard_swish (Tensor x, float threshold = 6.0, float scale = 6.0, float offset = 3.0) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold, float scale, float offset)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
hard_swish_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
huber_loss_grad
forward
:
huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
args
:
(Tensor residual, Tensor out_grad, float delta)
output
:
Tensor(input_grad), Tensor(label_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
residual
,
residual
]
kernel
:
func
:
huber_loss_grad
-
backward_api
:
imag_grad
forward
:
imag (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
invoke
:
imag_grad_impl(out_grad, x_grad)
-
backward_api
:
index_sample_grad
forward
:
index_sample (Tensor x, Tensor index) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
index_sample_grad
data_type
:
out_grad
no_need_buffer
:
x
-
backward_api
:
index_select_grad
forward
:
index_select(Tensor x, Tensor index, int dim) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, int dim)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
index_select_grad
data_type
:
x
no_need_buffer
:
x
-
backward_api
:
instance_norm_double_grad
forward
:
instance_norm_grad(Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, float epsilon) -> Tensor(grad_x), Tensor(grad_scale), Tensor(grad_bias)
args
:
(Tensor x, Tensor fwd_scale, Tensor saved_mean, Tensor saved_variance, Tensor grad_y, Tensor grad_x_grad, Tensor grad_scale_grad, Tensor grad_bias_grad, float epsilon)
output
:
Tensor(x_grad), Tensor(fwd_scale_grad), Tensor(grad_y_grad)
infer_meta
:
func
:
InstanceNormDoubleGradInferMeta
kernel
:
func
:
instance_norm_double_grad
data_type
:
x
optional
:
fwd_scale, grad_x_grad, grad_scale_grad, grad_bias_grad
-
backward_api
:
instance_norm_grad
forward
:
instance_norm(Tensor x, Tensor scale, Tensor bias, float epsilon) -> Tensor(y), Tensor(saved_mean), Tensor(saved_variance)
args
:
(Tensor x, Tensor scale, Tensor saved_mean, Tensor saved_variance, Tensor y_grad, float epsilon)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta
:
func
:
InstanceNormGradInferMeta
kernel
:
func
:
instance_norm_grad
data_type
:
x
optional
:
scale
backward
:
instance_norm_double_grad
-
backward_api
:
kldiv_loss_grad
forward
:
kldiv_loss(Tensor x, Tensor label, str reduction) -> Tensor(out)
args
:
(Tensor x, Tensor label, Tensor out_grad, str reduction)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
kldiv_loss_grad
no_need_buffer
:
x
-
backward_api
:
kron_grad
forward
:
kron (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
kron_grad
data_type
:
out_grad
-
backward_api
:
kthvalue_grad
forward
:
kthvalue(Tensor x, int k, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args
:
(Tensor x, Tensor indices, Tensor out_grad, int k, int axis, bool keepdim)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
kthvalue_grad
-
backward_api
:
label_smooth_grad
forward
:
label_smooth (Tensor label, Tensor prior_dist, float epsilon) -> Tensor(out)
args
:
(Tensor out_grad, float epsilon)
output
:
Tensor(label_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
label_smooth_grad
-
backward_api
:
layer_norm_grad
forward
:
layer_norm (Tensor x, Tensor scale, Tensor bias, float epsilon, int begin_norm_axis, bool is_test) -> Tensor(out), Tensor(mean), Tensor(variance)
args
:
(Tensor x, Tensor scale, Tensor bias, Tensor mean, Tensor variance, Tensor out_grad, float epsilon, int begin_norm_axis, bool is_test)
output
:
Tensor(x_grad), Tensor(scale_grad), Tensor(bias_grad)
infer_meta
:
func
:
LayerNormGradInferMeta
param
:
[
x
,
scale
,
bias
]
kernel
:
func
:
layer_norm_grad
data_type
:
out_grad
no_need_buffer
:
bias
optional
:
scale, bias
-
backward_api
:
leaky_relu_double_grad
forward
:
leaky_relu_grad (Tensor x, Tensor grad_out, float alpha) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_x_grad, float alpha)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
grad_x_grad
]
kernel
:
func
:
leaky_relu_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
leaky_relu_grad
forward
:
leaky_relu (Tensor x, float alpha) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
leaky_relu_grad
backward
:
leaky_relu_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
lerp_grad
forward
:
lerp (Tensor x, Tensor y, Tensor weight) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor weight, Tensor out, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
lerp_grad
-
backward_api
:
lgamma_grad
forward
:
lgamma(Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
lgamma_grad
-
backward_api
:
log10_grad
forward
:
log10 (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
log10_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
log1p_grad
forward
:
log1p (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
log1p_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
log2_grad
forward
:
log2 (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
log2_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
log_double_grad
forward
:
log_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_out, Tensor grad_x_grad)
output
:
Tensor(x_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
x
]
kernel
:
func
:
log_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
log_grad
forward
:
log (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
log_grad
backward
:
log_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
log_loss_grad
forward
:
log_loss (Tensor input, Tensor label, float epsilon) -> Tensor(out)
args
:
(Tensor input, Tensor label, Tensor out_grad, float epsilon)
output
:
Tensor(input_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
input
]
kernel
:
func
:
log_loss_grad
-
backward_api
:
log_softmax_grad
forward
:
log_softmax(Tensor x, int axis) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
log_softmax_grad
-
backward_api
:
logcumsumexp_grad
forward
:
logcumsumexp(Tensor x, int axis, bool flatten, bool exclusive, bool reverse) -> Tensor(out)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
args
:
(Tensor x, Tensor out, Tensor out_grad, int axis, bool flatten, bool exclusive, bool reverse)
output
:
Tensor(x_grad)
kernel
:
func
:
logcumsumexp_grad
-
backward_api
:
logit_grad
forward
:
logit (Tensor x, float eps = 1e-6f) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float eps)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
logit_grad
-
backward_api
:
logsigmoid_grad
forward
:
logsigmoid (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
logsigmoid_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
logsumexp_grad
forward
:
logsumexp(Tensor x, int64_t[] axis, bool keepdim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keepdim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
logsumexp_grad
-
backward_api
:
masked_select_grad
forward
:
masked_select (Tensor x, Tensor mask) -> Tensor(out)
args
:
(Tensor x, Tensor mask, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
masked_select_grad
data_type
:
x
no_need_buffer
:
x
-
backward_api
:
matmul_double_grad
forward
:
matmul_grad (Tensor x, Tensor y, Tensor grad_out, bool transpose_x=false, bool transpose_y=false) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor x, Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, bool transpose_x=false, bool transpose_y=false)
output
:
Tensor(x_grad), Tensor(y_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
x
,
y
,
grad_out
]
kernel
:
func
:
matmul_double_grad
backward
:
matmul_triple_grad
optional
:
grad_x_grad, grad_y_grad
-
backward_api
:
matmul_grad
forward
:
matmul (Tensor x, Tensor y, bool transpose_x=false, bool transpose_y=false) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, bool transpose_x=false, bool transpose_y=false)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
matmul_grad
backward
:
matmul_double_grad
-
backward_api
:
matmul_triple_grad
forward
:
matmul_double_grad (Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, bool transpose_x=false, bool transpose_y=false) -> Tensor(grad_x), Tensor(grad_y), Tensor(grad_grad_out)
args
:
(Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, Tensor grad_x_grad, Tensor grad_y_grad, Tensor grad_grad_out_grad, bool transpose_x=false, bool transpose_y=false)
output
:
Tensor(x_grad), Tensor(y_grad), Tensor(fwd_grad_out_grad), Tensor(fwd_grad_grad_x_grad), Tensor(fwd_grad_grad_y_grad)
infer_meta
:
func
:
GeneralQuinaryGradInferMeta
param
:
[
x
,
y
,
fwd_grad_out
,
fwd_grad_grad_x
,
fwd_grad_grad_y
]
kernel
:
func
:
matmul_triple_grad
optional
:
grad_x_grad, grad_y_grad, grad_grad_out_grad
-
backward_api
:
matrix_power_grad
forward
:
matrix_power (Tensor x, int n) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int n)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
matrix_power_grad
-
backward_api
:
max_grad
forward
:
max (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
max_grad
-
backward_api
:
max_pool2d_with_index_grad
forward
:
max_pool2d_with_index(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive) -> Tensor(out), Tensor(mask)
args
:
(Tensor x, Tensor mask, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output
:
Tensor(x_grad)
infer_meta
:
func
:
MaxPoolWithIndexGradInferMeta
kernel
:
func
:
max_pool2d_with_index_grad
-
backward_api
:
max_pool3d_with_index_grad
forward
:
max_pool3d_with_index(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive) -> Tensor(out), Tensor(mask)
args
:
(Tensor x, Tensor mask, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool global_pooling, bool adaptive)
output
:
Tensor(x_grad)
infer_meta
:
func
:
MaxPoolWithIndexGradInferMeta
kernel
:
func
:
max_pool3d_with_index_grad
-
backward_api
:
maximum_grad
forward
:
maximum(Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis=-1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
maximum_grad
-
backward_api
:
maxout_grad
forward
:
maxout(Tensor x, int groups, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int groups, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
maxout_grad
-
backward_api
:
mean_all_grad
forward
:
mean_all(Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mean_all_grad
-
backward_api
:
mean_double_grad
forward
:
mean_grad (Tensor x, Tensor grad_out, int64_t[] dims={}, bool keep_dim=false, bool reduce_all =
false
) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(grad_out_grad)
invoke
:
mean(grad_x_grad, dims, keep_dim)
-
backward_api
:
mean_grad
forward
:
mean (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mean_grad
backward
:
mean_double_grad
no_need_buffer
:
x
-
backward_api
:
meshgrid_grad
forward
:
meshgrid (Tensor[] inputs) -> Tensor[](outputs)
args
:
(Tensor[] inputs, Tensor[] outputs_grad)
output
:
Tensor[](inputs_grad){inputs.size()}
infer_meta
:
func
:
MeshgridGradInferMeta
kernel
:
func
:
meshgrid_grad
-
backward_api
:
min_grad
forward
:
min (Tensor x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
min_grad
-
backward_api
:
minimum_grad
forward
:
minimum(Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis=-1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
minimum_grad
-
backward_api
:
mish_grad
forward
:
mish (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mish_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
mode_grad
forward
:
mode(Tensor x, int axis, bool keepdim) -> Tensor(out), Tensor(indices)
args
:
(Tensor x, Tensor indices, Tensor out_grad, int axis, bool keepdim)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
mode_grad
-
backward_api
:
modulo_grad
forward
:
modulo (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
modulo_grad
no_need_buffer
:
x, y
-
backward_api
:
multi_dot_grad
forward
:
multi_dot (Tensor[] x) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad)
output
:
Tensor[](x_grad) {x.size()}
infer_meta
:
func
:
MultiDotGradInferMeta
kernel
:
func
:
multi_dot_grad
-
backward_api
:
multiplex_grad
forward
:
multiplex (Tensor[] ins, Tensor ids) -> Tensor(out)
args
:
(Tensor[] ins, Tensor ids, Tensor out_grad)
output
:
Tensor[](ins_grad){ins.size()}
infer_meta
:
func
:
MultiplexGradInferMeta
param
:
[
ids
,
out_grad
]
kernel
:
func
:
multiplex_grad
param
:
[
ids
,
out_grad
]
-
backward_api
:
multiply_double_grad
forward
:
multiply_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor x, Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
x
,
y
,
grad_out
]
kernel
:
func
:
multiply_double_grad
optional
:
grad_x_grad, grad_y_grad
backward
:
multiply_triple_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
multiply_grad
forward
:
multiply (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
multiply_grad
backward
:
multiply_double_grad
-
backward_api
:
multiply_triple_grad
forward
:
multiply_double_grad (Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, int aixs = -1) -> Tensor(grad_x), Tensor(grad_y), Tensor(grad_grad_out)
args
:
(Tensor x, Tensor y, Tensor fwd_grad_out, Tensor fwd_grad_grad_x, Tensor fwd_grad_grad_y, Tensor grad_x_grad, Tensor grad_y_grad, Tensor grad_grad_out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad), Tensor(fwd_grad_out_grad), Tensor(fwd_grad_grad_x_grad), Tensor(fwd_grad_grad_y_grad)
infer_meta
:
func
:
GeneralQuinaryGradInferMeta
param
:
[
x
,
y
,
fwd_grad_out
,
x
,
y
]
kernel
:
func
:
multiply_triple_grad
optional
:
fwd_grad_grad_x, fwd_grad_grad_y, grad_grad_out_grad
-
backward_api
:
mv_grad
forward
:
mv (Tensor x, Tensor vec) -> Tensor(out)
args
:
(Tensor x, Tensor vec, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(vec_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
vec
]
kernel
:
func
:
mv_grad
-
backward_api
:
nll_loss_grad
forward
:
nll_loss (Tensor input, Tensor label, Tensor weight, int64_t ignore_index, str reduction) -> Tensor(out), Tensor(total_weight)
args
:
(Tensor input, Tensor label, Tensor weight, Tensor total_weight, Tensor out_grad, int64_t ignore_index, str reduction)
output
:
Tensor(input_grad)
infer_meta
:
func
:
NllLossGradInferMeta
kernel
:
func
:
nll_loss_grad
data_type
:
input
optional
:
weight
-
backward_api
:
norm_grad
forward
:
norm (Tensor x, int axis, float epsilon, bool is_test) -> Tensor(out), Tensor(norm)
args
:
(Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
norm_grad
-
backward_api
:
p_norm_grad
forward
:
p_norm(Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, float porder, int axis, float epsilon, bool keepdim, bool asvector)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
p_norm_grad
-
backward_api
:
pad3d_double_grad
forward
:
pad3d_grad(Tensor x, Tensor grad_out, IntArray paddings, str mode, float pad_value, str data_format) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, IntArray paddings, str mode, float pad_value, str data_format)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
Pad3dInferMeta
kernel
:
func
:
pad3d
-
backward_api
:
pad3d_grad
forward
:
pad3d(Tensor x, IntArray paddings, str mode, float pad_value, str data_format) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, IntArray paddings, str mode, float pad_value, str data_format)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pad3d_grad
no_need_buffer
:
x
backward
:
pad3d_double_grad
-
backward_api
:
pad_double_grad
forward
:
pad_grad(Tensor x, Tensor grad_out, int[] paddings, float pad_value) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int[] paddings, float pad_value)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
PadInferMeta
kernel
:
func
:
pad
-
backward_api
:
pad_grad
forward
:
pad(Tensor x, int[] paddings, float pad_value) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int[] paddings, float pad_value)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pad_grad
param
:
[
out_grad
,
paddings
,
pad_value
]
no_need_buffer
:
x
backward
:
pad_double_grad
-
backward_api
:
pixel_shuffle_grad
forward
:
pixel_shuffle (Tensor x, int upscale_factor, str data_format) -> Tensor(out)
args
:
(Tensor out_grad, int upscale_factor, str data_format)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PixelShuffleGradInferMeta
kernel
:
func
:
pixel_shuffle_grad
-
backward_api
:
poisson_grad
forward
:
poisson (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
poisson_grad
-
backward_api
:
pool2d_double_grad
forward
:
pool2d_grad(Tensor x, Tensor out, Tensor grad_out, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
PoolInferMeta
kernel
:
func
:
pool2d_double_grad
use_gpudnn
:
true
-
backward_api
:
pool2d_grad
forward
:
pool2d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PoolGradInferMeta
kernel
:
func
:
pool2d_grad
use_gpudnn
:
true
backward
:
pool2d_double_grad
-
backward_api
:
pool2d_grad_gpudnn_unused
forward
:
pool2d_gpudnn_unused(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PoolGradInferMeta
kernel
:
func
:
pool2d_grad
use_gpudnn
:
false
-
backward_api
:
pool3d_grad
forward
:
pool3d(Tensor x, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int[] kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
output
:
Tensor(x_grad)
infer_meta
:
func
:
PoolGradInferMeta
kernel
:
func
:
pool3d_grad
use_gpudnn
:
true
-
backward_api
:
pow_grad
forward
:
pow(Tensor x, Scalar s) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, Scalar s=-1)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
pow_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
prelu_grad
forward
:
prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args
:
(Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
output
:
Tensor(x_grad), Tensor(alpha_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
alpha
]
kernel
:
func
:
prelu_grad
-
backward_api
:
psroi_pool_grad
forward
:
psroi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, int output_channels, float spatial_scale) -> Tensor(out)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, int output_channels, float spatial_scale)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
psroi_pool_grad
data_type
:
x
optional
:
boxes_num
# output is optional
-
backward_api
:
put_along_axis_grad
forward
:
put_along_axis (Tensor x, Tensor index, Tensor value, int axis, str reduce) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, int axis, str reduce)
output
:
Tensor(x_grad), Tensor(value_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
index
]
kernel
:
func
:
put_along_axis_grad
-
backward_api
:
real_grad
forward
:
real (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
invoke
:
real_grad_impl(out_grad, x_grad)
-
backward_api
:
reciprocal_grad
forward
:
reciprocal (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
reciprocal_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
reduce_prod_grad
forward
:
reduce_prod (Tensor x, int64_t[] dims, bool keep_dim, bool reduce_all) -> Tensor(out)
args
:
(Tensor x, Tensor out, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
prod_grad
-
backward_api
:
relu_double_grad
forward
:
relu_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor grad_x_grad)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
relu_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
relu_grad
forward
:
relu (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
relu_grad
backward
:
relu_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
reshape_double_grad
forward
:
reshape_grad (Tensor xshape, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor grad_out, Tensor grad_x_grad)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
grad_out
]
kernel
:
func
:
reshape_double_grad
no_need_buffer
:
grad_out
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
reshape_grad
forward
:
reshape (Tensor x, IntArray shape) -> Tensor(out), Tensor(xshape)
args
:
(Tensor xshape, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
KernelWithXShapeInferMeta
param
:
[
xshape
]
kernel
:
func
:
reshape_grad
param
:
[
out_grad
]
data_type
:
out_grad
backend
:
out_grad
layout
:
out_grad
backward
:
reshape_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
roi_align_grad
forward
:
roi_align (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned) -> Tensor(out)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, Tensor out_grad, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
roi_align_grad
data_type
:
boxes
no_need_buffer
:
x
optional
:
boxes_num
-
backward_api
:
roi_pool_grad
forward
:
roi_pool (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale) -> Tensor(out), Tensor(arg_max)
args
:
(Tensor x, Tensor boxes, Tensor boxes_num, Tensor arg_max, Tensor out_grad, int pooled_height, int pooled_width, float spatial_scale)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
roi_pool_grad
data_type
:
x
optional
:
boxes_num
-
backward_api
:
roll_grad
forward
:
roll(Tensor x, IntArray shifts, int64_t[] axis) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, IntArray shifts, int64_t[] axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
roll_grad
data_type
:
x
no_need_buffer
:
x
-
backward_api
:
round_grad
forward
:
round(Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
round_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
rsqrt_double_grad
forward
:
rsqrt_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor grad_x, Tensor grad_x_grad)
output
:
Tensor(out_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
out
,
out
]
kernel
:
func
:
rsqrt_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
rsqrt_grad
forward
:
rsqrt (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
rsqrt_grad
backward
:
rsqrt_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
scale_double_grad
forward
:
scale_grad (Tensor grad_out, Scalar scale, float bias, bool bias_after_scale) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, Scalar scale=1.0, float bias=0.0, bool bias_after_scale=true)
output
:
Tensor(grad_out_grad)
invoke
:
scale(grad_x_grad, scale, 0.0, bias_after_scale)
backward
:
scale_triple_grad
-
backward_api
:
scale_grad
forward
:
scale (Tensor x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(out)
args
:
(Tensor out_grad, Scalar scale=1.0, float bias=0.0, bool bias_after_scale=true)
output
:
Tensor(x_grad)
invoke
:
scale(out_grad, scale, 0.0, bias_after_scale)
backward
:
scale_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
scale_triple_grad
forward
:
scale_double_grad (Tensor grad_grad_x, Scalar scale, float bias, bool bias_after_scale) -> Tensor(grad_grad_out)
args
:
(Tensor grad_grad_out_grad, Scalar scale=1.0, float bias=0.0, bool bias_after_scale=true)
output
:
Tensor(grad_grad_x_grad)
invoke
:
scale(grad_grad_out_grad, scale, 0.0, bias_after_scale)
-
backward_api
:
scatter_grad
forward
:
scatter (Tensor x, Tensor index, Tensor updates, bool overwrite) -> Tensor(out)
args
:
(Tensor index, Tensor updates, Tensor out_grad, bool overwrite)
output
:
Tensor(x_grad), Tensor(updates_grad)
infer_meta
:
func
:
ScatterGradInferMeta
param
:
[
index
,
updates
,
out_grad
,
overwrite
]
kernel
:
func
:
scatter_grad
no_need_buffer
:
updates
-
backward_api
:
scatter_nd_add_grad
forward
:
scatter_nd_add (Tensor x, Tensor index, Tensor updates) -> Tensor(out)
args
:
(Tensor index, Tensor updates, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(updates_grad)
infer_meta
:
func
:
ScatterNdAddGradInferMeta
param
:
[
index
,
updates
,
out_grad
]
kernel
:
func
:
scatter_nd_add_grad
no_need_buffer
:
updates
-
backward_api
:
segment_pool_grad
forward
:
segment_pool (Tensor x, Tensor segment_ids, str pooltype) -> Tensor(out), Tensor(summed_ids)
args
:
(Tensor x, Tensor segment_ids, Tensor out, Tensor summed_ids, Tensor out_grad, str pooltype)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
segment_pool_grad
data_type
:
x
optional
:
summed_ids
-
backward_api
:
selu_grad
forward
:
selu (Tensor x, float scale, float alpha) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, float scale, float alpha)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
selu_grad
-
backward_api
:
sigmoid_cross_entropy_with_logits_grad
forward
:
sigmoid_cross_entropy_with_logits (Tensor x, Tensor label, bool normalize, int ignore_index) -> Tensor(out)
args
:
(Tensor x, Tensor label, Tensor out_grad, bool normalize, int ignore_index)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
sigmoid_cross_entropy_with_logits_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sigmoid_double_grad
forward
:
sigmoid_grad (Tensor out, Tensor fwd_grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor fwd_grad_out, Tensor grad_x_grad)
output
:
Tensor(out_grad), Tensor(fwd_grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
out
,
fwd_grad_out
]
kernel
:
func
:
sigmoid_double_grad
backward
:
sigmoid_triple_grad
inplace
:
(grad_x_grad -> fwd_grad_out_grad)
-
backward_api
:
sigmoid_grad
forward
:
sigmoid (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
sigmoid_grad
backward
:
sigmoid_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sigmoid_triple_grad
forward
:
sigmoid_double_grad (Tensor out, Tensor fwd_grad_out, Tensor grad_grad_x) -> Tensor(grad_out), Tensor(grad_grad_out)
args
:
(Tensor out, Tensor fwd_grad_out, Tensor grad_grad_x, Tensor grad_out_grad, Tensor grad_grad_out_grad)
output
:
Tensor(out_grad), Tensor(fwd_grad_out_grad), Tensor(grad_grad_x_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
out
,
fwd_grad_out
,
grad_grad_x
]
kernel
:
func
:
sigmoid_triple_grad
optional
:
grad_grad_out_grad
inplace
:
(grad_grad_x -> fwd_grad_out_grad)
-
backward_api
:
silu_grad
forward
:
silu (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
silu_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sin_grad
forward
:
sin (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
sin_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sinh_grad
forward
:
sinh (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
sinh_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
slice_grad
forward
:
slice (Tensor input, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis) -> Tensor(out)
args
:
(Tensor input, Tensor out_grad, int64_t[] axes, IntArray starts, IntArray ends, int64_t[] infer_flags, int64_t[] decrease_axis)
output
:
Tensor(input_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
input
]
kernel
:
func
:
slice_grad
no_need_buffer
:
input
-
backward_api
:
soft_shrink_grad
forward
:
soft_shrink (Tensor x, float lambda) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float lambda)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
soft_shrink_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
softmax_grad
forward
:
softmax (Tensor x, int axis) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
softmax_grad
use_gpudnn
:
true
-
backward_api
:
split_grad
forward
:
split (Tensor x, IntArray num_or_sections, Scalar axis) -> Tensor[](out)
args
:
(Tensor[] out_grad, Scalar axis = -1)
output
:
Tensor(x_grad)
invoke
:
concat( out_grad, axis)
# TODO(zhangyunfei) The config of double grad and triple grad will be supported in the future.
-
backward_api
:
sqrt_double_grad
forward
:
sqrt_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor grad_x, Tensor grad_x_grad)
output
:
Tensor(out_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
out
,
out
]
kernel
:
func
:
sqrt_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
sqrt_grad
forward
:
sqrt (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
sqrt_grad
backward
:
sqrt_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
square_double_grad
forward
:
square_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor x, Tensor grad_out, Tensor grad_x_grad)
output
:
Tensor(x_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
x
]
kernel
:
func
:
square_double_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
square_grad
forward
:
square (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
square_grad
backward
:
square_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
squeeze_double_grad
forward
:
squeeze_grad(Tensor xshape, Tensor grad_out, int[] axes) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int[] axes)
output
:
Tensor(grad_out_grad)
invoke
:
squeeze(grad_x_grad, axes)
-
backward_api
:
squeeze_grad
forward
:
squeeze(Tensor x, int[] axes) -> Tensor(out), Tensor(xshape)
args
:
(Tensor xshape, Tensor out_grad, int[] axes)
output
:
Tensor(x_grad)
infer_meta
:
func
:
KernelWithXShapeInferMeta
param
:
[
xshape
]
kernel
:
func
:
squeeze_grad
inplace
:
(out_grad -> x_grad)
backward
:
squeeze_double_grad
-
backward_api
:
stack_grad
forward
:
stack (Tensor[] x, int axis) -> Tensor(out)
args
:
(Tensor[] x, Tensor out_grad, int axis)
output
:
Tensor[](x_grad){x.size()}
infer_meta
:
func
:
StackGradInferMeta
param
:
[
out_grad
,
axis
]
kernel
:
func
:
stack_grad
param
:
[
out_grad
,
axis
]
no_need_buffer
:
x
-
backward_api
:
strided_slice_grad
forward
:
strided_slice (Tensor x, int[] axes, IntArray starts, IntArray ends, IntArray strides) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int[] axes, IntArray starts, IntArray ends, IntArray strides)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
strided_slice_grad
no_need_buffer
:
x
-
backward_api
:
subtract_double_grad
forward
:
subtract_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args
:
(Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
grad_out
]
kernel
:
func
:
subtract_double_grad
optional
:
grad_x_grad, grad_y_grad
no_need_buffer
:
y, grad_out
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
subtract_grad
forward
:
subtract (Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out_grad, int axis = -1)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
subtract_grad
no_need_buffer
:
x, y
backward
:
subtract_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
sum_double_grad
forward
:
sum_grad (Tensor x, Tensor grad_out, int64_t[] dims, bool keep_dim, bool reduce_all=false) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int64_t[] dims={}, bool keep_dim=false)
output
:
Tensor(grad_out_grad)
invoke
:
sum(grad_x_grad, dims, grad_x_grad.dtype(), keep_dim)
backward
:
sum_triple_grad
-
backward_api
:
sum_grad
forward
:
sum (Tensor x, int64_t[] dims={}, DataType out_dtype=DataType::UNDEFINED, bool keep_dim=false) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int64_t[] dims, bool keep_dim, bool reduce_all=false)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
sum_grad
no_need_buffer
:
x
backward
:
sum_double_grad
-
backward_api
:
sum_triple_grad
forward
:
sum_double_grad (Tensor grad_grad_x, int64_t[] dims={}, bool keep_dim=false) -> Tensor(grad_grad_out)
args
:
(Tensor grad_grad_x, Tensor grad_grad_out_grad, int64_t[] dims={}, bool keep_dim=false, bool reduce_all=false)
output
:
Tensor(grad_grad_x_grad)
invoke
:
sum_grad(grad_grad_x, grad_grad_out_grad, dims, keep_dim, reduce_all, grad_grad_x_grad)
-
backward_api
:
swish_grad
forward
:
swish (Tensor x, float beta=1.0) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float bete=1.0)
output
:
Tensor(x_grad)
infer_meta
:
func
:
GeneralUnaryGradInferMeta
param
:
[
x
]
kernel
:
func
:
swish_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
take_along_axis_grad
forward
:
take_along_axis (Tensor x, Tensor index, int axis) -> Tensor(out)
args
:
(Tensor x, Tensor index, Tensor out_grad, int axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
take_along_axis_grad
-
backward_api
:
tan_grad
forward
:
tan (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
tan_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
tanh_double_grad
forward
:
tanh_grad (Tensor out, Tensor grad_out) -> Tensor(grad_x)
args
:
(Tensor out, Tensor grad_out, Tensor grad_x_grad)
output
:
Tensor(out_grad), Tensor(grad_out_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
out
,
out
]
kernel
:
func
:
tanh_double_grad
backward
:
tanh_triple_grad
inplace
:
(grad_x_grad -> grad_out_grad)
-
backward_api
:
tanh_grad
forward
:
tanh (Tensor x) -> Tensor(out)
args
:
(Tensor out, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out
]
kernel
:
func
:
tanh_grad
backward
:
tanh_double_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
tanh_shrink_grad
forward
:
tanh_shrink (Tensor x) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
tanh_shrink_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
tanh_triple_grad
forward
:
tanh_double_grad (Tensor out, Tensor grad_out_forward, Tensor grad_x_grad_forward) -> Tensor(grad_out_new), Tensor(grad_out_grad)
args
:
(Tensor out, Tensor grad_out_forward, Tensor grad_x_grad_forward, Tensor grad_out_new_grad, Tensor grad_out_grad_grad)
output
:
Tensor(out_grad), Tensor(grad_out_forward_grad), Tensor(grad_x_grad_forward_grad)
infer_meta
:
func
:
GeneralTernaryGradInferMeta
param
:
[
out
,
out
,
grad_x_grad_forward
]
kernel
:
func
:
tanh_triple_grad
inplace
:
(grad_x_grad_forward -> grad_out_forward_grad)
-
backward_api
:
thresholded_relu_grad
forward
:
thresholded_relu (Tensor x, float threshold) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, float threshold)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
thresholded_relu_grad
inplace
:
(out_grad -> x_grad)
-
backward_api
:
tile_double_grad
forward
:
tile_grad (Tensor x, Tensor grad_out, IntArray repeat_times) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, IntArray repeat_times)
output
:
Tensor(grad_out_grad)
infer_meta
:
func
:
TileInferMeta
kernel
:
func
:
tile
-
backward_api
:
tile_grad
forward
:
tile (Tensor x, IntArray repeat_times) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, IntArray repeat_times)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
tile_grad
no_need_buffer
:
x
backward
:
tile_double_grad
-
backward_api
:
top_k_grad
forward
:
top_k (Tensor x, Scalar k, int axis = -1, bool largest =
true
, bool sorted =
true
) -> Tensor(out), Tensor(indices)
args
:
(Tensor x, Tensor indices, Tensor out_grad, Scalar k = -1, int axis = -1, bool largest =
true
, bool sorted =
true
)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
top_k_grad
-
backward_api
:
trace_grad
forward
:
trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int offset, int axis1, int axis2)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
trace_grad
no_need_buffer
:
x
-
backward_api
:
transpose_double_grad
forward
:
transpose_grad (Tensor grad_out, int[] axis) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, int[] axis)
output
:
Tensor(grad_out_grad)
invoke
:
transpose(grad_x_grad, axis)
-
backward_api
:
transpose_grad
forward
:
transpose (Tensor x, int[] axis) -> Tensor(out)
args
:
(Tensor out_grad, int[] axis)
output
:
Tensor(x_grad)
infer_meta
:
func
:
TransposeGradInferMeta
param
:
[
out_grad
,
axis
]
kernel
:
func
:
transpose_grad
backward
:
transpose_double_grad
-
backward_api
:
triangular_solve_grad
forward
:
triangular_solve (Tensor x, Tensor y, bool upper, bool tranpose, bool unitriangular) -> Tensor(out)
args
:
(Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper, bool tranpose, bool unitriangular)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
triangular_solve_grad
-
backward_api
:
tril_triu_grad
forward
:
tril_triu(Tensor x, int diagonal, bool lower) -> Tensor(out)
args
:
(Tensor out_grad, int diagonal, bool lower)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
tril_triu_grad
-
backward_api
:
trunc_grad
forward
:
trunc (Tensor x) -> Tensor(out)
args
:
(Tensor out_grad)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
out_grad
]
kernel
:
func
:
trunc_grad
-
backward_api
:
unbind_grad
forward
:
unbind (Tensor input, int axis) -> Tensor[](out)
args
:
(Tensor[] out_grad, int axis)
output
:
Tensor(input_grad)
invoke
:
stack(out_grad, axis)
-
backward_api
:
unfold_grad
forward
:
unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
args
:
(Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output
:
Tensor(x_grad)
infer_meta
:
func
:
UnchangedInferMeta
param
:
[
x
]
kernel
:
func
:
unfold_grad
no_need_buffer
:
x
-
backward_api
:
unsqueeze_double_grad
forward
:
unsqueeze_grad(Tensor xshape, Tensor grad_out, IntArray axes) -> Tensor(grad_x)
args
:
(Tensor grad_x_grad, IntArray axes)
output
:
Tensor(grad_out_grad)
invoke
:
unsqueeze(grad_x_grad, axes)
-
backward_api
:
unsqueeze_grad
forward
:
unsqueeze(Tensor x, IntArray axes) -> Tensor(out), Tensor(xshape)
args
:
(Tensor xshape, Tensor out_grad, IntArray axes)
output
:
Tensor(x_grad)
infer_meta
:
func
:
KernelWithXShapeInferMeta
param
:
[
xshape
]
kernel
:
func
:
unsqueeze_grad
param
:
[
xshape
,
out_grad
]
inplace
:
(out_grad -> x_grad)
backward
:
unsqueeze_double_grad
-
backward_api
:
where_grad
forward
:
where (Tensor condition, Tensor x, Tensor y) -> Tensor(out)
args
:
(Tensor condition, Tensor x, Tensor y, Tensor out_grad)
output
:
Tensor(x_grad), Tensor(y_grad)
infer_meta
:
func
:
GeneralBinaryGradInferMeta
param
:
[
x
,
y
]
kernel
:
func
:
where_grad
no_need_buffer
:
x, y
python/paddle/utils/code_gen/new_api.yaml
已删除
100644 → 0
浏览文件 @
346efe96
python/paddle/utils/code_gen/new_backward.yaml
已删除
100644 → 0
浏览文件 @
346efe96
tools/infrt/generate_phi_kernel_dialect.py
浏览文件 @
fcd32950
...
@@ -71,9 +71,17 @@ def get_skipped_kernel_list():
...
@@ -71,9 +71,17 @@ def get_skipped_kernel_list():
def
get_api_yaml_info
(
file_path
):
def
get_api_yaml_info
(
file_path
):
f
=
open
(
file_path
+
"/python/paddle/utils/code_gen/api.yaml"
,
"r"
)
apis
=
[]
cont
=
f
.
read
()
with
open
(
file_path
+
"/python/paddle/utils/code_gen/api.yaml"
,
'r'
)
as
f
:
return
yaml
.
load
(
cont
,
Loader
=
yaml
.
FullLoader
)
api_list
=
yaml
.
load
(
f
,
Loader
=
yaml
.
FullLoader
)
if
api_list
:
apis
.
extend
(
api_list
)
with
open
(
file_path
+
"/python/paddle/utils/code_gen/legacy_api.yaml"
,
'r'
)
as
f
:
legacy_api_list
=
yaml
.
load
(
f
,
Loader
=
yaml
.
FullLoader
)
if
legacy_api_list
:
apis
.
extend
(
legacy_api_list
)
return
apis
def
generate_kernel_name
(
op_name
,
place_str
):
def
generate_kernel_name
(
op_name
,
place_str
):
...
...
tools/infrt/get_phi_kernel_function.sh
浏览文件 @
fcd32950
...
@@ -78,7 +78,7 @@ done
...
@@ -78,7 +78,7 @@ done
#step 2:get simple general inferMeta function wrap info
#step 2:get simple general inferMeta function wrap info
temp_path
=
`
mktemp
-d
`
temp_path
=
`
mktemp
-d
`
python3
${
PADDLE_ROOT
}
/python/paddle/utils/code_gen/wrapped_infermeta_gen.py
\
python3
${
PADDLE_ROOT
}
/python/paddle/utils/code_gen/wrapped_infermeta_gen.py
\
--api_yaml_path
${
PADDLE_ROOT
}
/python/paddle/utils/code_gen/api.yaml
\
--api_yaml_path
${
PADDLE_ROOT
}
/python/paddle/utils/code_gen/api.yaml
${
PADDLE_ROOT
}
/python/paddle/utils/code_gen/legacy_api.yaml
\
--wrapped_infermeta_header_path
${
temp_path
}
/generate.h
\
--wrapped_infermeta_header_path
${
temp_path
}
/generate.h
\
--wrapped_infermeta_source_path
${
temp_path
}
/generate.cc
--wrapped_infermeta_source_path
${
temp_path
}
/generate.cc
...
...
tools/infrt/get_phi_kernel_info.py
浏览文件 @
fcd32950
...
@@ -21,13 +21,21 @@ from typing import List, Dict, Any
...
@@ -21,13 +21,21 @@ from typing import List, Dict, Any
skipped_phi_api_list_file
=
"/tools/infrt/skipped_phi_api.json"
skipped_phi_api_list_file
=
"/tools/infrt/skipped_phi_api.json"
api_yaml_file
=
"/python/paddle/utils/code_gen/api.yaml"
api_yaml_file
=
"/python/paddle/utils/code_gen/api.yaml"
legacy_api_yaml_file
=
"/python/paddle/utils/code_gen/legacy_api.yaml"
def
get_skipped_kernel_list
():
def
get_skipped_kernel_list
():
skiped_kernel_list
=
[]
skiped_kernel_list
=
[]
with
open
(
skipped_phi_api_list_file
,
'r'
)
as
f
:
with
open
(
skipped_phi_api_list_file
,
'r'
)
as
f
:
skiped_api_list
=
json
.
load
(
f
)
skiped_api_list
=
json
.
load
(
f
)
infer_meta_data
=
get_api_yaml_info
(
api_yaml_file
)
infer_meta_data
=
[]
api_meta_data
=
get_api_yaml_info
(
api_yaml_file
)
legacy_api_meta_data
=
get_api_yaml_info
(
legacy_api_yaml_file
)
if
api_meta_data
:
infer_meta_data
.
extend
(
api_meta_data
)
if
legacy_api_meta_data
:
infer_meta_data
.
extend
(
legacy_api_meta_data
)
for
api
in
infer_meta_data
:
for
api
in
infer_meta_data
:
if
"kernel"
not
in
api
or
"infer_meta"
not
in
api
:
if
"kernel"
not
in
api
or
"infer_meta"
not
in
api
:
continue
continue
...
@@ -365,7 +373,14 @@ if __name__ == "__main__":
...
@@ -365,7 +373,14 @@ if __name__ == "__main__":
args
=
parse_args
()
args
=
parse_args
()
skipped_phi_api_list_file
=
args
.
paddle_root_path
+
skipped_phi_api_list_file
skipped_phi_api_list_file
=
args
.
paddle_root_path
+
skipped_phi_api_list_file
api_yaml_file
=
args
.
paddle_root_path
+
api_yaml_file
api_yaml_file
=
args
.
paddle_root_path
+
api_yaml_file
infer_meta_data
=
get_api_yaml_info
(
api_yaml_file
)
legacy_api_yaml_file
=
args
.
paddle_root_path
+
legacy_api_yaml_file
infer_meta_data
=
[]
api_meta_data
=
get_api_yaml_info
(
api_yaml_file
)
legacy_api_meta_data
=
get_api_yaml_info
(
legacy_api_yaml_file
)
if
api_meta_data
:
infer_meta_data
.
extend
(
api_meta_data
)
if
legacy_api_meta_data
:
infer_meta_data
.
extend
(
legacy_api_meta_data
)
kernel_data
=
get_kernel_info
(
args
.
kernel_info_file
)
kernel_data
=
get_kernel_info
(
args
.
kernel_info_file
)
info_meta_wrap_data
=
get_infermeta_info
(
args
.
infermeta_wrap_file
)
info_meta_wrap_data
=
get_infermeta_info
(
args
.
infermeta_wrap_file
)
attr_data
=
get_attr_info
(
args
.
attr_info_file
)
attr_data
=
get_attr_info
(
args
.
attr_info_file
)
...
...
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录