未验证 提交 8caaf85a 编写于 作者: C Chen Weihang 提交者: GitHub

[Cherry-pick] Normalize yaml name and label (#46052)

* normalize yaml file name (#45894)

* Clear extra attributes of activation op in OpMaker (#45772)

* clear extra attr of activation op in opmaker

* fix syntax bug

* fix mkldnn kernel

* fix merge conflict

* fix bug

* [PHI] Normalize yaml op label (#45976)

* normalize yaml op label

* revert op_compat yaml change

* fix prelu and rnn compat problem

* replace api by op

* support assign op backward refuse forward (#45879)

* normize yaml backward op label (#46028)
Co-authored-by: Nzyfncg <zhangyunfei07@baidu.com>
Co-authored-by: NCharles-hit <56987902+Charles-hit@users.noreply.github.com>
上级 00486956
set(api_yaml_path set(api_yaml_path
"${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/api.yaml,${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/legacy_api.yaml,${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/sparse_api.yaml" "${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/ops.yaml,${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/legacy_ops.yaml,${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/sparse_ops.yaml"
) )
set(backward_yaml_path set(backward_yaml_path
"${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/backward.yaml,${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/legacy_backward.yaml,${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/sparse_bw_api.yaml" "${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/backward.yaml,${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/legacy_backward.yaml,${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/sparse_backward.yaml"
) )
set(tmp_forwards_cc_path set(tmp_forwards_cc_path
"${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.cc" "${PADDLE_SOURCE_DIR}/paddle/fluid/eager/api/generated/eager_generated/forwards/tmp_dygraph_functions.cc"
...@@ -30,7 +30,7 @@ set(nodes_h_path ...@@ -30,7 +30,7 @@ set(nodes_h_path
) )
# StringTensor only needs forward api # StringTensor only needs forward api
set(fwd_api_yaml_path set(fwd_api_yaml_path
"${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/strings_api.yaml") "${PADDLE_SOURCE_DIR}/paddle/phi/api/yaml/strings_ops.yaml")
message("Final State Eager CodeGen") message("Final State Eager CodeGen")
add_custom_target( add_custom_target(
......
...@@ -83,10 +83,10 @@ def ReadBwdFile(filepath): ...@@ -83,10 +83,10 @@ def ReadBwdFile(filepath):
ret = {} ret = {}
if contents is not None: if contents is not None:
for content in contents: for content in contents:
assert 'backward_api' in content.keys(), AssertMessage( assert 'backward_op' in content.keys(), AssertMessage(
'backward_api', content.keys()) 'backward_op', content.keys())
if 'backward_api' in content.keys(): if 'backward_op' in content.keys():
api_name = content['backward_api'] api_name = content['backward_op']
ret[api_name] = content ret[api_name] = content
f.close() f.close()
...@@ -418,12 +418,12 @@ class FunctionGeneratorBase: ...@@ -418,12 +418,12 @@ class FunctionGeneratorBase:
def CollectOriginalForwardInfo(self): def CollectOriginalForwardInfo(self):
forward_api_contents = self.forward_api_contents forward_api_contents = self.forward_api_contents
self.forward_api_name = forward_api_contents['api'] self.forward_api_name = forward_api_contents['op']
forward_args_str = forward_api_contents['args'] forward_args_str = forward_api_contents['args']
forward_returns_str = forward_api_contents['output'] forward_returns_str = forward_api_contents['output']
assert 'api' in forward_api_contents.keys( assert 'op' in forward_api_contents.keys(
), "Unable to find \"api\" in forward_api_contents keys" ), "Unable to find \"op\" in forward_api_contents keys"
assert 'args' in forward_api_contents.keys( assert 'args' in forward_api_contents.keys(
), "Unable to find \"args\" in forward_api_contents keys" ), "Unable to find \"args\" in forward_api_contents keys"
assert 'output' in forward_api_contents.keys( assert 'output' in forward_api_contents.keys(
......
...@@ -569,16 +569,16 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase): ...@@ -569,16 +569,16 @@ class DygraphFunctionGeneratorBase(FunctionGeneratorBase):
forward_api_contents = self.forward_api_contents forward_api_contents = self.forward_api_contents
grad_api_contents = self.grad_api_contents grad_api_contents = self.grad_api_contents
assert 'api' in forward_api_contents.keys( assert 'op' in forward_api_contents.keys(
), "Unable to find \"api\" in api.yaml" ), "Unable to find \"op\" in ops.yaml"
assert 'args' in forward_api_contents.keys( assert 'args' in forward_api_contents.keys(
), "Unable to find \"args\" in api.yaml" ), "Unable to find \"args\" in ops.yaml"
assert 'output' in forward_api_contents.keys( assert 'output' in forward_api_contents.keys(
), "Unable to find \"output\" in api.yaml" ), "Unable to find \"output\" in ops.yaml"
if grad_api_contents is not None: if grad_api_contents is not None:
assert 'backward' in forward_api_contents.keys( assert 'backward' in forward_api_contents.keys(
), "Unable to find \"backward\" in api.yaml" ), "Unable to find \"backward\" in ops.yaml"
assert 'args' in grad_api_contents.keys( assert 'args' in grad_api_contents.keys(
), "Unable to find \"args\" in backward.yaml" ), "Unable to find \"args\" in backward.yaml"
assert 'output' in grad_api_contents.keys( assert 'output' in grad_api_contents.keys(
...@@ -1485,7 +1485,7 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase): ...@@ -1485,7 +1485,7 @@ class DygraphNodeGenerator(DygraphFunctionGeneratorBase):
if next_grad_api_contents: if next_grad_api_contents:
# Fake forward_api_contents and backward_api_contents # Fake forward_api_contents and backward_api_contents
forward_api_contents = grad_api_contents forward_api_contents = grad_api_contents
forward_api_contents['api'] = forward_api_contents['backward_api'] forward_api_contents['op'] = forward_api_contents['backward_op']
backward_api_contents = next_grad_api_contents backward_api_contents = next_grad_api_contents
next_node_generator = DygraphFunctionGeneratorBase( next_node_generator = DygraphFunctionGeneratorBase(
...@@ -1914,11 +1914,11 @@ class DygraphForwardAndNodesGenerator(GeneratorBase): ...@@ -1914,11 +1914,11 @@ class DygraphForwardAndNodesGenerator(GeneratorBase):
grad_api_dict = self.grad_api_dict grad_api_dict = self.grad_api_dict
forward_apis_dict = {} forward_apis_dict = {}
for api_item in forward_api_list: for api_item in forward_api_list:
forward_apis_dict[api_item['api']] = api_item forward_apis_dict[api_item['op']] = api_item
namespace = self.namespace namespace = self.namespace
for forward_api_contents in forward_api_list: for forward_api_contents in forward_api_list:
if forward_api_contents['api'] in black_ops_list: continue if forward_api_contents['op'] in black_ops_list: continue
self.CollectIsForwardOnly(forward_api_contents) self.CollectIsForwardOnly(forward_api_contents)
...@@ -1959,8 +1959,7 @@ class DygraphForwardAndNodesGenerator(GeneratorBase): ...@@ -1959,8 +1959,7 @@ class DygraphForwardAndNodesGenerator(GeneratorBase):
forward_api_contents = backward_api_contents forward_api_contents = backward_api_contents
# Fake forward_api_content # Fake forward_api_content
forward_api_contents['api'] = forward_api_contents[ forward_api_contents['op'] = forward_api_contents['backward_op']
'backward_api']
backward_api_contents = next_grad_api_contents backward_api_contents = next_grad_api_contents
if len(namespace) > 0: if len(namespace) > 0:
...@@ -2043,7 +2042,7 @@ if __name__ == "__main__": ...@@ -2043,7 +2042,7 @@ if __name__ == "__main__":
api_yaml_path = api_yaml_paths[i] api_yaml_path = api_yaml_paths[i]
# string api is forwrad only # string api is forwrad only
if not api_yaml_path.endswith('strings_api.yaml'): if not api_yaml_path.endswith('strings_ops.yaml'):
backward_yaml_path = backward_yaml_paths[i] backward_yaml_path = backward_yaml_paths[i]
else: else:
backward_yaml_path = None backward_yaml_path = None
......
...@@ -213,8 +213,8 @@ class SingleGradOpMaker<OpDesc> : public GradOpDescMakerBase { ...@@ -213,8 +213,8 @@ class SingleGradOpMaker<OpDesc> : public GradOpDescMakerBase {
std::vector<std::unique_ptr<OpDesc>> retv; std::vector<std::unique_ptr<OpDesc>> retv;
retv.emplace_back(new OpDesc()); retv.emplace_back(new OpDesc());
try { try {
this->Apply(retv.front().get());
retv.front()->SetRuntimeAttrMap(this->RuntimeAttrs()); retv.front()->SetRuntimeAttrMap(this->RuntimeAttrs());
this->Apply(retv.front().get());
} catch (platform::EnforceNotMet& exception) { } catch (platform::EnforceNotMet& exception) {
framework::AppendErrorOpHint(retv.front().get()->Type(), &exception); framework::AppendErrorOpHint(retv.front().get()->Type(), &exception);
throw std::move(exception); throw std::move(exception);
......
...@@ -38,29 +38,20 @@ static constexpr bool CanInplaceAct() { ...@@ -38,29 +38,20 @@ static constexpr bool CanInplaceAct() {
GradFunctor::FwdDeps() == ActBwdOpFwdDeps::kNoDeps; GradFunctor::FwdDeps() == ActBwdOpFwdDeps::kNoDeps;
} }
#define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \ #define REGISTER_ACTIVATION_OP_MAKER(OP_NAME, OP_COMMENT) \
class OP_NAME##OpMaker \ class OP_NAME##OpMaker \
: public ::paddle::framework::OpProtoAndCheckerMaker { \ : public ::paddle::framework::OpProtoAndCheckerMaker { \
public: \ public: \
void Make() override { \ void Make() override { \
AddInput("X", \ AddInput("X", \
"Input of " #OP_NAME \ "Input of " #OP_NAME \
" operator, an N-D Tensor, with data type float32, " \ " operator, an N-D Tensor, with data type float32, " \
"float64 or float16."); \ "float64 or float16."); \
AddOutput("Out", \ AddOutput("Out", \
"Output of " #OP_NAME \ "Output of " #OP_NAME \
" operator, a Tensor with shape same as input."); \ " operator, a Tensor with shape same as input."); \
AddAttr<bool>("use_mkldnn", \ AddComment(OP_COMMENT); \
"(bool, default false) Only used in mkldnn kernel") \ } \
.SetDefault(false) \
.AsExtra(); \
AddAttr<bool>("use_cudnn", \
"(bool, default false) Only used in cudnn kernel, need " \
"install cudnn") \
.SetDefault(false) \
.AsExtra(); \
AddComment(OP_COMMENT); \
} \
} }
template <ActBwdOpFwdDeps kDepValue, typename T> template <ActBwdOpFwdDeps kDepValue, typename T>
...@@ -107,8 +98,7 @@ framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx, ...@@ -107,8 +98,7 @@ framework::OpKernelType GetKernelType(const framework::ExecutionContext& ctx,
// } // }
// #endif // #endif
#ifdef PADDLE_WITH_MKLDNN #ifdef PADDLE_WITH_MKLDNN
auto it = oper.Attrs().find("use_mkldnn"); if (library == framework::LibraryType::kPlain &&
if (library == framework::LibraryType::kPlain && it != oper.Attrs().end() &&
oper.CanMKLDNNBeUsed(ctx, data_type)) { oper.CanMKLDNNBeUsed(ctx, data_type)) {
library = framework::LibraryType::kMKLDNN; library = framework::LibraryType::kMKLDNN;
layout = framework::DataLayout::kMKLDNN; layout = framework::DataLayout::kMKLDNN;
...@@ -458,10 +448,6 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -458,10 +448,6 @@ class LeakyReluOpMaker : public framework::OpProtoAndCheckerMaker {
"A LoDTensor or Tensor with the same type and size as that of x."); "A LoDTensor or Tensor with the same type and size as that of x.");
AddAttr<float>("alpha", "Slope of the activation function at x < 0.") AddAttr<float>("alpha", "Slope of the activation function at x < 0.")
.SetDefault(0.02f); .SetDefault(0.02f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
LeakyRelu Activation Operator. LeakyRelu Activation Operator.
...@@ -483,35 +469,6 @@ class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -483,35 +469,6 @@ class SoftplusOpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("beta", "The value of beta for Softplus.").SetDefault(1.0f); AddAttr<float>("beta", "The value of beta for Softplus.").SetDefault(1.0f);
AddAttr<float>("threshold", "The value of threshold for Softplus.") AddAttr<float>("threshold", "The value of threshold for Softplus.")
.SetDefault(20.0f); .SetDefault(20.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel.")
.SetDefault(false)
.AsExtra();
AddAttr<bool>(
"use_cudnn",
"(bool, default false) Only used in cudnn kernel, need install cudnn.")
.SetDefault(false)
.AsExtra();
AddAttr<std::string>(
"fuse_activation_type",
"Fused activation type used in softplus OneDNN kernel.")
.SetDefault("")
.AsExtra();
AddAttr<float>(
"fuse_activation_alpha",
"Fused activation alpha parameter type used in softplus OneDNN kernel.")
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>(
"fuse_activation_beta",
"Fused activation beta parameter type used in softplus OneDNN kernel.")
.SetDefault(0.0f)
.AsExtra();
AddAttr<float>(
"fuse_activation_scale",
"Fused activation scale parameter type used in softplus OneDNN kernel.")
.SetDefault(1.0f)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
:strong:`Softplus Activation Operator` :strong:`Softplus Activation Operator`
...@@ -613,10 +570,6 @@ class ELUOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -613,10 +570,6 @@ class ELUOpMaker : public framework::OpProtoAndCheckerMaker {
"The output is a multi-dimensional Tensor which has same " "The output is a multi-dimensional Tensor which has same "
"dimension and data type as the ``x``."); "dimension and data type as the ``x``.");
AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f); AddAttr<float>("alpha", "The alpha value of ELU").SetDefault(1.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
ELU Activation Operator. ELU Activation Operator.
...@@ -712,10 +665,6 @@ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -712,10 +665,6 @@ class Relu6OpMaker : public framework::OpProtoAndCheckerMaker {
AddAttr<float>("threshold", AddAttr<float>("threshold",
"The threshold value of Relu6. Default is 6.0. ") "The threshold value of Relu6. Default is 6.0. ")
.SetDefault(6.0f); .SetDefault(6.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Relu6 Activation Operator. Relu6 Activation Operator.
...@@ -817,10 +766,6 @@ class SwishOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -817,10 +766,6 @@ class SwishOpMaker : public framework::OpProtoAndCheckerMaker {
AddInput("X", "Input of Swish operator"); AddInput("X", "Input of Swish operator");
AddOutput("Out", "Output of Swish operator"); AddOutput("Out", "Output of Swish operator");
AddAttr<float>("beta", "Constant beta of swish operator").SetDefault(1.0f); AddAttr<float>("beta", "Constant beta of swish operator").SetDefault(1.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Swish Activation Operator. Swish Activation Operator.
...@@ -841,10 +786,6 @@ class MishOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -841,10 +786,6 @@ class MishOpMaker : public framework::OpProtoAndCheckerMaker {
"of softplus will be used if absolute value of input is greater than " "of softplus will be used if absolute value of input is greater than "
":attr:`threshold`") ":attr:`threshold`")
.SetDefault(20.f); .SetDefault(20.f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
Mish Activation Operator. Mish Activation Operator.
...@@ -871,10 +812,6 @@ class HardSwishOpMaker : public framework::OpProtoAndCheckerMaker { ...@@ -871,10 +812,6 @@ class HardSwishOpMaker : public framework::OpProtoAndCheckerMaker {
.SetDefault(6.0f); .SetDefault(6.0f);
AddAttr<float>("offset", "The offset parameter of HardSwish operator") AddAttr<float>("offset", "The offset parameter of HardSwish operator")
.SetDefault(3.0f); .SetDefault(3.0f);
AddAttr<bool>("use_mkldnn",
"(bool, default false) Only used in mkldnn kernel")
.SetDefault(false)
.AsExtra();
AddComment(R"DOC( AddComment(R"DOC(
HardSwish Activation Operator. HardSwish Activation Operator.
......
...@@ -21,9 +21,9 @@ set(api_gen_base ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/api_base.py) ...@@ -21,9 +21,9 @@ set(api_gen_base ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/api_base.py)
# forward api file # forward api file
set(api_gen_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/api_gen.py) set(api_gen_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/api_gen.py)
set(api_yaml_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/api.yaml) set(api_yaml_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/ops.yaml)
set(legacy_api_yaml_file set(legacy_api_yaml_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/legacy_api.yaml) ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/legacy_ops.yaml)
set(api_header_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/include/api.h) set(api_header_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/include/api.h)
set(api_source_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/lib/api.cc) set(api_source_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/lib/api.cc)
set(api_header_file_tmp ${api_header_file}.tmp) set(api_header_file_tmp ${api_header_file}.tmp)
...@@ -55,7 +55,7 @@ set(dygraph_api_source_file_tmp ${dygraph_api_source_file}.tmp) ...@@ -55,7 +55,7 @@ set(dygraph_api_source_file_tmp ${dygraph_api_source_file}.tmp)
set(sparse_api_gen_file set(sparse_api_gen_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/sparse_api_gen.py) ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/sparse_api_gen.py)
set(sparse_api_yaml_file set(sparse_api_yaml_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/sparse_api.yaml) ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/sparse_ops.yaml)
set(sparse_api_header_file set(sparse_api_header_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/include/sparse_api.h) ${CMAKE_SOURCE_DIR}/paddle/phi/api/include/sparse_api.h)
set(sparse_api_source_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/lib/sparse_api.cc) set(sparse_api_source_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/lib/sparse_api.cc)
...@@ -66,7 +66,7 @@ set(sparse_api_source_file_tmp ${sparse_api_source_file}.tmp) ...@@ -66,7 +66,7 @@ set(sparse_api_source_file_tmp ${sparse_api_source_file}.tmp)
set(sparse_bw_api_gen_file set(sparse_bw_api_gen_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py) ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/sparse_bw_api_gen.py)
set(sparse_bw_api_yaml_file set(sparse_bw_api_yaml_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/sparse_bw_api.yaml) ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/sparse_backward.yaml)
set(sparse_bw_api_header_file set(sparse_bw_api_header_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/backward/sparse_bw_api.h) ${CMAKE_SOURCE_DIR}/paddle/phi/api/backward/sparse_bw_api.h)
set(sparse_bw_api_source_file set(sparse_bw_api_source_file
...@@ -78,7 +78,7 @@ set(sparse_bw_api_source_file_tmp ${sparse_bw_api_source_file}.tmp) ...@@ -78,7 +78,7 @@ set(sparse_bw_api_source_file_tmp ${sparse_bw_api_source_file}.tmp)
set(strings_api_gen_file set(strings_api_gen_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/strings_api_gen.py) ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/strings_api_gen.py)
set(strings_api_yaml_file set(strings_api_yaml_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/strings_api.yaml) ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/strings_ops.yaml)
set(strings_api_header_file set(strings_api_header_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/include/strings_api.h) ${CMAKE_SOURCE_DIR}/paddle/phi/api/include/strings_api.h)
set(strings_api_source_file set(strings_api_source_file
...@@ -97,8 +97,7 @@ set(wrapped_infermeta_source_file ...@@ -97,8 +97,7 @@ set(wrapped_infermeta_source_file
# op extra info file # op extra info file
set(ops_extra_info_gen_file set(ops_extra_info_gen_file
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/ops_extra_info_gen.py) ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/generator/ops_extra_info_gen.py)
set(api_compat_yaml_file set(op_compat_yaml_file ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/op_compat.yaml)
${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml/api_compat.yaml)
set(ops_extra_info_file set(ops_extra_info_file
${CMAKE_SOURCE_DIR}/paddle/fluid/operators/ops_extra_info.cc) ${CMAKE_SOURCE_DIR}/paddle/fluid/operators/ops_extra_info.cc)
...@@ -130,10 +129,10 @@ message( ...@@ -130,10 +129,10 @@ message(
execute_process( execute_process(
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/paddle/phi/api/yaml
COMMAND ${CMAKE_COMMAND} -E make_directory ${parsed_api_dir} COMMAND ${CMAKE_COMMAND} -E make_directory ${parsed_api_dir}
COMMAND ${PYTHON_EXECUTABLE} generator/parse_api.py --api_yaml_path ./api.yaml COMMAND ${PYTHON_EXECUTABLE} generator/parse_api.py --api_yaml_path ./ops.yaml
--output_path ./parsed_apis/api.parsed.yaml --output_path ./parsed_apis/api.parsed.yaml
COMMAND ${PYTHON_EXECUTABLE} generator/parse_api.py --api_yaml_path COMMAND ${PYTHON_EXECUTABLE} generator/parse_api.py --api_yaml_path
./legacy_api.yaml --output_path ./parsed_apis/legacy_api.parsed.yaml ./legacy_ops.yaml --output_path ./parsed_apis/legacy_api.parsed.yaml
COMMAND COMMAND
${PYTHON_EXECUTABLE} generator/parse_api.py --api_yaml_path ./backward.yaml ${PYTHON_EXECUTABLE} generator/parse_api.py --api_yaml_path ./backward.yaml
--output_path ./parsed_apis/backward_api.parsed.yaml --backward --output_path ./parsed_apis/backward_api.parsed.yaml --backward
...@@ -175,7 +174,7 @@ execute_process( ...@@ -175,7 +174,7 @@ execute_process(
${PYTHON_EXECUTABLE} generator/generate_op.py --api_yaml_path ${PYTHON_EXECUTABLE} generator/generate_op.py --api_yaml_path
./parsed_apis/api.parsed.yaml --backward_api_yaml_path ./parsed_apis/api.parsed.yaml --backward_api_yaml_path
./parsed_apis/backward_api.parsed.yaml --api_version_yaml_path ./parsed_apis/backward_api.parsed.yaml --api_version_yaml_path
api_version.yaml --api_compat_yaml_path api_compat.yaml --output_op_path op_version.yaml --op_compat_yaml_path op_compat.yaml --output_op_path
"${generated_op_path}.tmp" --output_arg_map_path "${generated_op_path}.tmp" --output_arg_map_path
"${generated_argument_mapping_path}.tmp" "${generated_argument_mapping_path}.tmp"
RESULT_VARIABLE _result) RESULT_VARIABLE _result)
...@@ -221,9 +220,8 @@ endif() ...@@ -221,9 +220,8 @@ endif()
# generate ops extra info # generate ops extra info
execute_process( execute_process(
COMMAND COMMAND ${PYTHON_EXECUTABLE} ${ops_extra_info_gen_file} --op_compat_yaml_path
${PYTHON_EXECUTABLE} ${ops_extra_info_gen_file} --api_compat_yaml_path ${op_compat_yaml_file} --ops_extra_info_path ${ops_extra_info_file})
${api_compat_yaml_file} --ops_extra_info_path ${ops_extra_info_file})
message("generate ${ops_extra_info_file}") message("generate ${ops_extra_info_file}")
# generate forward api # generate forward api
......
...@@ -40,7 +40,7 @@ inline bool NeedTransformPlace(const paddle::platform::Place& input, ...@@ -40,7 +40,7 @@ inline bool NeedTransformPlace(const paddle::platform::Place& input,
const TransformFlag& transform_flag) { const TransformFlag& transform_flag) {
// NOTE(dev): The default value of TransformFlag is True, if it is set with // NOTE(dev): The default value of TransformFlag is True, if it is set with
// False // False
// somewhere such as api.yaml or backward.yaml that means we should skip data // somewhere such as ops.yaml or backward.yaml that means we should skip data
// transform. Because "stop_transform_" has highest priority. // transform. Because "stop_transform_" has highest priority.
if (!transform_flag.need_trans_backend()) { if (!transform_flag.need_trans_backend()) {
return false; return false;
......
- backward_api : atan2_grad - backward_op : atan2_grad
forward : atan2 (Tensor x, Tensor y) -> Tensor(out) forward : atan2 (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
kernel : kernel :
func : atan2_grad func : atan2_grad
- backward_api : cholesky_grad - backward_op : cholesky_grad
forward : cholesky (Tensor x, bool upper) -> Tensor(out) forward : cholesky (Tensor x, bool upper) -> Tensor(out)
args : (Tensor out, Tensor out_grad, bool upper) args : (Tensor out, Tensor out_grad, bool upper)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
kernel : kernel :
func : cholesky_grad func : cholesky_grad
- backward_api : cholesky_solve_grad - backward_op : cholesky_solve_grad
forward : cholesky_solve (Tensor x, Tensor y, bool upper) -> Tensor(out) forward : cholesky_solve (Tensor x, Tensor y, bool upper) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper) args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, bool upper)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
kernel : kernel :
func : cholesky_solve_grad func : cholesky_solve_grad
- backward_api : cross_grad - backward_op : cross_grad
forward : cross (Tensor x, Tensor y, int axis = 9) -> Tensor(out) forward : cross (Tensor x, Tensor y, int axis = 9) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad, int axis) args : (Tensor x, Tensor y, Tensor out_grad, int axis)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
func : cross_grad func : cross_grad
data_type : out_grad data_type : out_grad
- backward_api : diag_grad - backward_op : diag_grad
forward : diag (Tensor x, int offset, float padding_value) -> Tensor(out) forward : diag (Tensor x, int offset, float padding_value) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset) args : (Tensor x, Tensor out_grad, int offset)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -51,7 +51,7 @@ ...@@ -51,7 +51,7 @@
data_type : out_grad data_type : out_grad
no_need_buffer : x no_need_buffer : x
- backward_api : diagonal_grad - backward_op : diagonal_grad
forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out) forward : diagonal (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1) args : (Tensor x, Tensor out_grad, int offset = 0, int axis1 = 0, int axis2 = 1)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -63,7 +63,7 @@ ...@@ -63,7 +63,7 @@
data_type : out_grad data_type : out_grad
no_need_buffer : x no_need_buffer : x
- backward_api : digamma_grad - backward_op : digamma_grad
forward : digamma (Tensor x) -> Tensor(out) forward : digamma (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
kernel : kernel :
func : digamma_grad func : digamma_grad
- backward_api : dist_grad - backward_op : dist_grad
forward : dist (Tensor x, Tensor y, float p) -> Tensor(out) forward : dist (Tensor x, Tensor y, float p) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, float p) args : (Tensor x, Tensor y, Tensor out, Tensor out_grad, float p)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -83,7 +83,7 @@ ...@@ -83,7 +83,7 @@
kernel : kernel :
func : dist_grad func : dist_grad
- backward_api : dot_grad - backward_op : dot_grad
forward : dot (Tensor x, Tensor y) -> Tensor(out) forward : dot (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -94,7 +94,7 @@ ...@@ -94,7 +94,7 @@
func : dot_grad func : dot_grad
data_type : out_grad data_type : out_grad
- backward_api : erf_grad - backward_op : erf_grad
forward : erf (Tensor x) -> Tensor(out) forward : erf (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -105,7 +105,7 @@ ...@@ -105,7 +105,7 @@
func : erf_grad func : erf_grad
data_type : out_grad data_type : out_grad
- backward_api : erfinv_grad - backward_op : erfinv_grad
forward : erfinv (Tensor x) -> Tensor(out) forward : erfinv (Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad) args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -115,7 +115,7 @@ ...@@ -115,7 +115,7 @@
kernel : kernel :
func : erfinv_grad func : erfinv_grad
- backward_api : fft_c2c_grad - backward_op : fft_c2c_grad
forward: fft_c2c(Tensor x, int64_t[] axes, str normalization, bool forward) -> Tensor(out) forward: fft_c2c(Tensor x, int64_t[] axes, str normalization, bool forward) -> Tensor(out)
args : (Tensor out_grad, int64_t[] axes, str normalization, bool forward) args : (Tensor out_grad, int64_t[] axes, str normalization, bool forward)
output: Tensor(x_grad) output: Tensor(x_grad)
...@@ -125,7 +125,7 @@ ...@@ -125,7 +125,7 @@
kernel : kernel :
func : fft_c2c_grad func : fft_c2c_grad
- backward_api : fft_c2r_grad - backward_op : fft_c2r_grad
forward: fft_c2r(Tensor x, int64_t[] axes, str normalization, bool forward, int64_t last_dim_size) -> Tensor(out) forward: fft_c2r(Tensor x, int64_t[] axes, str normalization, bool forward, int64_t last_dim_size) -> Tensor(out)
args : (Tensor out_grad, int64_t[] axes, str normalization, bool forward, int64_t last_dim_size) args : (Tensor out_grad, int64_t[] axes, str normalization, bool forward, int64_t last_dim_size)
output: Tensor(x_grad) output: Tensor(x_grad)
...@@ -135,7 +135,7 @@ ...@@ -135,7 +135,7 @@
func : fft_c2r_grad func : fft_c2r_grad
data_type: out_grad data_type: out_grad
- backward_api : fft_r2c_grad - backward_op : fft_r2c_grad
forward: fft_r2c(Tensor x, int64_t[] axes, str normalization, bool forward, bool onesided) -> Tensor(out) forward: fft_r2c(Tensor x, int64_t[] axes, str normalization, bool forward, bool onesided) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int64_t[] axes, str normalization, bool forward, bool onesided) args : (Tensor x, Tensor out_grad, int64_t[] axes, str normalization, bool forward, bool onesided)
output: Tensor(x_grad) output: Tensor(x_grad)
...@@ -147,7 +147,7 @@ ...@@ -147,7 +147,7 @@
data_type: out_grad data_type: out_grad
no_need_buffer: x no_need_buffer: x
- backward_api : graph_send_uv_grad - backward_op : graph_send_uv_grad
forward : graph_send_uv (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD") -> Tensor(out) forward : graph_send_uv (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD") -> Tensor(out)
args: (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, Tensor out_grad, str message_op = "ADD") args: (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, Tensor out_grad, str message_op = "ADD")
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -158,7 +158,7 @@ ...@@ -158,7 +158,7 @@
func : graph_send_uv_grad func : graph_send_uv_grad
data_type : x data_type : x
- backward_api : lgamma_grad - backward_op : lgamma_grad
forward : lgamma(Tensor x) -> Tensor(out) forward : lgamma(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -168,7 +168,7 @@ ...@@ -168,7 +168,7 @@
kernel : kernel :
func : lgamma_grad func : lgamma_grad
- backward_api : mv_grad - backward_op : mv_grad
forward : mv (Tensor x, Tensor vec) -> Tensor(out) forward : mv (Tensor x, Tensor vec) -> Tensor(out)
args : (Tensor x, Tensor vec, Tensor out_grad) args : (Tensor x, Tensor vec, Tensor out_grad)
output : Tensor(x_grad), Tensor(vec_grad) output : Tensor(x_grad), Tensor(vec_grad)
...@@ -178,7 +178,7 @@ ...@@ -178,7 +178,7 @@
kernel : kernel :
func : mv_grad func : mv_grad
- backward_api : poisson_grad - backward_op : poisson_grad
forward : poisson (Tensor x) -> Tensor(out) forward : poisson (Tensor x) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -188,7 +188,7 @@ ...@@ -188,7 +188,7 @@
kernel : kernel :
func : poisson_grad func : poisson_grad
- backward_api : solve_grad - backward_op : solve_grad
forward : solve (Tensor x, Tensor y) -> Tensor(out) forward : solve (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -198,7 +198,7 @@ ...@@ -198,7 +198,7 @@
kernel : kernel :
func : solve_grad func : solve_grad
- backward_api : trace_grad - backward_op : trace_grad
forward : trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out) forward : trace (Tensor x, int offset, int axis1, int axis2) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int offset, int axis1, int axis2) args : (Tensor x, Tensor out_grad, int offset, int axis1, int axis2)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -210,7 +210,7 @@ ...@@ -210,7 +210,7 @@
data_type : out_grad data_type : out_grad
no_need_buffer : x no_need_buffer : x
- backward_api : trunc_grad - backward_op : trunc_grad
forward : trunc (Tensor x) -> Tensor(out) forward : trunc (Tensor x) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
......
...@@ -50,7 +50,7 @@ class BaseAPI(object): ...@@ -50,7 +50,7 @@ class BaseAPI(object):
self.inplace_map, self.view_map = {}, {} self.inplace_map, self.view_map = {}, {}
def get_api_name(self, api_item_yaml): def get_api_name(self, api_item_yaml):
return api_item_yaml['api'] return api_item_yaml['op']
def get_api_func_name(self): def get_api_func_name(self):
return self.api return self.api
...@@ -881,7 +881,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d ...@@ -881,7 +881,7 @@ PADDLE_API {self.get_return_type(inplace_flag=True)} {api_func_name}({self.get_d
def get_condition_code(self, kernel_name): def get_condition_code(self, kernel_name):
assert self.kernel['dispatch'][kernel_name], \ assert self.kernel['dispatch'][kernel_name], \
f"{self.api} api: the tensor type of inputs and outputs for kernel isn't set, see also 'kernel:func' of 'scale' in api.yaml." f"{self.api} api: the tensor type of inputs and outputs for kernel isn't set, see also 'kernel:func' of 'scale' in ops.yaml."
input_types = self.kernel['dispatch'][kernel_name][0] input_types = self.kernel['dispatch'][kernel_name][0]
condition_list = [] condition_list = []
for i, in_type in enumerate(input_types): for i, in_type in enumerate(input_types):
......
...@@ -319,7 +319,7 @@ def main(): ...@@ -319,7 +319,7 @@ def main():
parser.add_argument('--api_yaml_path', parser.add_argument('--api_yaml_path',
help='path to api yaml file', help='path to api yaml file',
nargs='+', nargs='+',
default='paddle/phi/api/yaml/api.yaml') default='paddle/phi/api/yaml/ops.yaml')
parser.add_argument('--api_header_path', parser.add_argument('--api_header_path',
help='output of generated api header code file', help='output of generated api header code file',
......
...@@ -28,14 +28,14 @@ class BackwardAPI(BaseAPI): ...@@ -28,14 +28,14 @@ class BackwardAPI(BaseAPI):
self.no_need_buffer = self.parse_no_need_buffer(backward_item_yaml) self.no_need_buffer = self.parse_no_need_buffer(backward_item_yaml)
def get_api_name(self, api_item_yaml): def get_api_name(self, api_item_yaml):
return api_item_yaml['backward_api'] return api_item_yaml['backward_op']
def parse_forward_config(self, forward_config): def parse_forward_config(self, forward_config):
# api_name (const Tensor& input, ... , int attr, ...) -> Tensor(out) # api_name (const Tensor& input, ... , int attr, ...) -> Tensor(out)
result = re.search( result = re.search(
r"(?P<api>[a-z][a-z0-9_]+)\s*(?P<args>\([^\)]+\))\s*->\s*(?P<outputs>.+)", r"(?P<op>[a-z][a-z0-9_]+)\s*(?P<args>\([^\)]+\))\s*->\s*(?P<outputs>.+)",
forward_config) forward_config)
api = result.group('api') api = result.group('op')
_, outputs, _, = self.parse_output(self.api, result.group('outputs')) _, outputs, _, = self.parse_output(self.api, result.group('outputs'))
outputs = [item.split('@')[0] for item in outputs] outputs = [item.split('@')[0] for item in outputs]
fw_inputs, fw_attrs = self.parse_input_and_attr(api, fw_inputs, fw_attrs = self.parse_input_and_attr(api,
......
...@@ -65,7 +65,7 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict): ...@@ -65,7 +65,7 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict):
return names[0].strip(), names[1].split(')')[0].strip() return names[0].strip(), names[1].split(')')[0].strip()
for api_args in api_op_map: for api_args in api_op_map:
api_name, op_name = get_api_and_op_name(api_args['api']) api_name, op_name = get_api_and_op_name(api_args['op'])
if api_name not in forward_api_dict: if api_name not in forward_api_dict:
continue continue
forward_api_item = forward_api_dict[api_name] forward_api_item = forward_api_dict[api_name]
...@@ -175,7 +175,7 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict): ...@@ -175,7 +175,7 @@ def replace_compat_name(api_op_map, forward_api_dict, backward_api_dict):
] ]
def main(api_yaml_path, backward_yaml_path, api_compat_yaml_path, def main(api_yaml_path, backward_yaml_path, op_compat_yaml_path,
api_version_yaml_path, output_op_path, output_arg_map_path): api_version_yaml_path, output_op_path, output_arg_map_path):
with open(api_yaml_path, "rt") as f: with open(api_yaml_path, "rt") as f:
apis = yaml.safe_load(f) apis = yaml.safe_load(f)
...@@ -191,9 +191,9 @@ def main(api_yaml_path, backward_yaml_path, api_compat_yaml_path, ...@@ -191,9 +191,9 @@ def main(api_yaml_path, backward_yaml_path, api_compat_yaml_path,
api_versions = yaml.safe_load(f) api_versions = yaml.safe_load(f)
# add api version info into api # add api version info into api
for api_version in api_versions: for api_version in api_versions:
forward_api_dict[api_version['api']]['version'] = api_version['version'] forward_api_dict[api_version['op']]['version'] = api_version['version']
with open(api_compat_yaml_path, "rt") as f: with open(op_compat_yaml_path, "rt") as f:
api_op_map = yaml.safe_load(f) api_op_map = yaml.safe_load(f)
for api in apis: for api in apis:
...@@ -244,7 +244,7 @@ if __name__ == "__main__": ...@@ -244,7 +244,7 @@ if __name__ == "__main__":
parser.add_argument('--backward_api_yaml_path', parser.add_argument('--backward_api_yaml_path',
type=str, type=str,
help="parsed backward api yaml file.") help="parsed backward api yaml file.")
parser.add_argument('--api_compat_yaml_path', parser.add_argument('--op_compat_yaml_path',
type=str, type=str,
help="api args compat yaml file.") help="api args compat yaml file.")
parser.add_argument('--api_version_yaml_path', parser.add_argument('--api_version_yaml_path',
...@@ -260,5 +260,5 @@ if __name__ == "__main__": ...@@ -260,5 +260,5 @@ if __name__ == "__main__":
args = parser.parse_args() args = parser.parse_args()
main(args.api_yaml_path, args.backward_api_yaml_path, main(args.api_yaml_path, args.backward_api_yaml_path,
args.api_compat_yaml_path, args.api_version_yaml_path, args.op_compat_yaml_path, args.api_version_yaml_path,
args.output_op_path, args.output_arg_map_path) args.output_op_path, args.output_arg_map_path)
...@@ -135,11 +135,11 @@ def main(): ...@@ -135,11 +135,11 @@ def main():
parser.add_argument('--api_yaml_path', parser.add_argument('--api_yaml_path',
nargs='+', nargs='+',
help='path to api yaml file', help='path to api yaml file',
default='paddle/phi/api/yaml/api.yaml') default='paddle/phi/api/yaml/ops.yaml')
parser.add_argument('--sparse_api_yaml_path', parser.add_argument('--sparse_api_yaml_path',
help='path to sparse api yaml file', help='path to sparse api yaml file',
default='paddle/phi/api/yaml/sparse_api.yaml') default='paddle/phi/api/yaml/sparse_ops.yaml')
parser.add_argument('--dygraph_api_header_path', parser.add_argument('--dygraph_api_header_path',
help='output of generated dygraph api header code file', help='output of generated dygraph api header code file',
......
...@@ -65,9 +65,9 @@ def parse_attr(attr_str): ...@@ -65,9 +65,9 @@ def parse_attr(attr_str):
'name'), result.group('default_val') 'name'), result.group('default_val')
def generate_extra_info(api_compat_yaml_path, ops_extra_info_path): def generate_extra_info(op_compat_yaml_path, ops_extra_info_path):
compat_apis = [] compat_apis = []
with open(api_compat_yaml_path, 'rt') as f: with open(op_compat_yaml_path, 'rt') as f:
compat_apis = yaml.safe_load(f) compat_apis = yaml.safe_load(f)
def get_op_name(api_item): def get_op_name(api_item):
...@@ -80,9 +80,9 @@ def generate_extra_info(api_compat_yaml_path, ops_extra_info_path): ...@@ -80,9 +80,9 @@ def generate_extra_info(api_compat_yaml_path, ops_extra_info_path):
extra_map_str_list = [] extra_map_str_list = []
extra_checker_str_list = [] extra_checker_str_list = []
for api_compat_args in compat_apis: for op_compat_args in compat_apis:
if 'extra' in api_compat_args: if 'extra' in op_compat_args:
extra_args_map = api_compat_args['extra'] extra_args_map = op_compat_args['extra']
# TODO(chenweihang): add inputs and outputs # TODO(chenweihang): add inputs and outputs
if 'attrs' in extra_args_map: if 'attrs' in extra_args_map:
attr_map_list = [] attr_map_list = []
...@@ -103,13 +103,13 @@ def generate_extra_info(api_compat_yaml_path, ops_extra_info_path): ...@@ -103,13 +103,13 @@ def generate_extra_info(api_compat_yaml_path, ops_extra_info_path):
api_extra_attr_checkers = ",\n ".join( api_extra_attr_checkers = ",\n ".join(
attr_checker_func_list) attr_checker_func_list)
extra_map_str_list.append( extra_map_str_list.append(
f"{{\"{get_op_name(api_compat_args['api'])}\", {{ {api_extra_attr_map} }}}}" f"{{\"{get_op_name(op_compat_args['op'])}\", {{ {api_extra_attr_map} }}}}"
) )
extra_checker_str_list.append( extra_checker_str_list.append(
f"{{\"{get_op_name(api_compat_args['api'])}\", {{ {api_extra_attr_checkers} }}}}" f"{{\"{get_op_name(op_compat_args['op'])}\", {{ {api_extra_attr_checkers} }}}}"
) )
if 'backward' in api_compat_args: if 'backward' in op_compat_args:
for bw_item in api_compat_args['backward'].split(','): for bw_item in op_compat_args['backward'].split(','):
bw_op_name = get_op_name(bw_item) bw_op_name = get_op_name(bw_item)
extra_map_str_list.append( extra_map_str_list.append(
f"{{\"{bw_op_name}\", {{ {api_extra_attr_map} }}}}") f"{{\"{bw_op_name}\", {{ {api_extra_attr_map} }}}}")
...@@ -127,9 +127,9 @@ def generate_extra_info(api_compat_yaml_path, ops_extra_info_path): ...@@ -127,9 +127,9 @@ def generate_extra_info(api_compat_yaml_path, ops_extra_info_path):
def main(): def main():
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Generate PaddlePaddle Extra Param Info for Op') description='Generate PaddlePaddle Extra Param Info for Op')
parser.add_argument('--api_compat_yaml_path', parser.add_argument('--op_compat_yaml_path',
help='path to api compat yaml file', help='path to api compat yaml file',
default='paddle/phi/api/yaml/api_compat.yaml') default='paddle/phi/api/yaml/op_compat.yaml')
parser.add_argument('--ops_extra_info_path', parser.add_argument('--ops_extra_info_path',
help='output of generated extra_prama_info code file', help='output of generated extra_prama_info code file',
...@@ -137,10 +137,10 @@ def main(): ...@@ -137,10 +137,10 @@ def main():
options = parser.parse_args() options = parser.parse_args()
api_compat_yaml_path = options.api_compat_yaml_path op_compat_yaml_path = options.op_compat_yaml_path
ops_extra_info_path = options.ops_extra_info_path ops_extra_info_path = options.ops_extra_info_path
generate_extra_info(api_compat_yaml_path, ops_extra_info_path) generate_extra_info(op_compat_yaml_path, ops_extra_info_path)
if __name__ == '__main__': if __name__ == '__main__':
......
...@@ -27,7 +27,7 @@ def main(api_yaml_path, output_path, backward): ...@@ -27,7 +27,7 @@ def main(api_yaml_path, output_path, backward):
apis = [] apis = []
else: else:
apis = [ apis = [
parse_api_entry(api, "backward_api" if backward else "api") parse_api_entry(api, "backward_op" if backward else "op")
for api in apis for api in apis
] ]
......
...@@ -210,9 +210,9 @@ def extract_type_and_name(records: List[Dict]) -> List[Dict]: ...@@ -210,9 +210,9 @@ def extract_type_and_name(records: List[Dict]) -> List[Dict]:
def parse_forward(api_name: str, forward_config: str) -> Dict[str, Any]: def parse_forward(api_name: str, forward_config: str) -> Dict[str, Any]:
# api_name (const Tensor& input, ... , int attr, ...) -> Tensor(out) # api_name (const Tensor& input, ... , int attr, ...) -> Tensor(out)
result = re.search( result = re.search(
r"(?P<api>[a-z][a-z0-9_]+)\s*(?P<args>\([^\)]+\))\s*->\s*(?P<outputs>.+)", r"(?P<op>[a-z][a-z0-9_]+)\s*(?P<args>\([^\)]+\))\s*->\s*(?P<outputs>.+)",
forward_config) forward_config)
api = result.group("api") api = result.group("op")
outputs = parse_outputs(api_name, result.group("outputs")) outputs = parse_outputs(api_name, result.group("outputs"))
outputs = extract_type_and_name(outputs) outputs = extract_type_and_name(outputs)
...@@ -228,7 +228,7 @@ def parse_forward(api_name: str, forward_config: str) -> Dict[str, Any]: ...@@ -228,7 +228,7 @@ def parse_forward(api_name: str, forward_config: str) -> Dict[str, Any]:
return forward_cfg return forward_cfg
def parse_api_entry(api_entry: Dict[str, Any], name_field="api"): def parse_api_entry(api_entry: Dict[str, Any], name_field="op"):
api_name = api_entry[name_field] api_name = api_entry[name_field]
inputs, attrs = parse_input_and_attr(api_name, api_entry["args"]) inputs, attrs = parse_input_and_attr(api_name, api_entry["args"])
outputs = parse_outputs(api_name, api_entry["output"]) outputs = parse_outputs(api_name, api_entry["output"])
...@@ -334,7 +334,7 @@ def parse_api_entry(api_entry: Dict[str, Any], name_field="api"): ...@@ -334,7 +334,7 @@ def parse_api_entry(api_entry: Dict[str, Any], name_field="api"):
api["backward"] = backward api["backward"] = backward
# forward for backward_apis # forward for backward_apis
is_backward_api = name_field == "backward_api" is_backward_api = name_field == "backward_op"
if is_backward_api: if is_backward_api:
if "forward" in api_entry: if "forward" in api_entry:
forward = parse_forward(api_name, api_entry["forward"]) forward = parse_forward(api_name, api_entry["forward"])
......
...@@ -160,7 +160,7 @@ class SparseAPI(ForwardAPI): ...@@ -160,7 +160,7 @@ class SparseAPI(ForwardAPI):
def get_condition_code(self, kernel_name): def get_condition_code(self, kernel_name):
assert self.kernel['dispatch'][kernel_name], \ assert self.kernel['dispatch'][kernel_name], \
f"{self.api} api: the tensor type of inputs and outputs for kernel isn't set, see also 'kernel:func' of 'conv3d' in sparse_api.yaml." f"{self.api} api: the tensor type of inputs and outputs for kernel isn't set, see also 'kernel:func' of 'conv3d' in sparse_ops.yaml."
input_types = self.kernel['dispatch'][kernel_name][0] input_types = self.kernel['dispatch'][kernel_name][0]
sparse_type_map = { sparse_type_map = {
'sparse_coo': 'DataLayout::SPARSE_COO', 'sparse_coo': 'DataLayout::SPARSE_COO',
...@@ -284,7 +284,7 @@ def main(): ...@@ -284,7 +284,7 @@ def main():
description='Generate PaddlePaddle C++ Sparse API files') description='Generate PaddlePaddle C++ Sparse API files')
parser.add_argument('--api_yaml_path', parser.add_argument('--api_yaml_path',
help='path to sparse api yaml file', help='path to sparse api yaml file',
default='paddle/phi/api/yaml/sparse_api.yaml') default='paddle/phi/api/yaml/sparse_ops.yaml')
parser.add_argument('--api_header_path', parser.add_argument('--api_header_path',
help='output of generated api header code file', help='output of generated api header code file',
......
...@@ -164,7 +164,7 @@ def main(): ...@@ -164,7 +164,7 @@ def main():
description='Generate PaddlePaddle C++ Sparse API files') description='Generate PaddlePaddle C++ Sparse API files')
parser.add_argument('--api_yaml_path', parser.add_argument('--api_yaml_path',
help='path to sparse api yaml file', help='path to sparse api yaml file',
default='paddle/phi/api/yaml/sparse_bw_api.yaml') default='paddle/phi/api/yaml/sparse_backward.yaml')
parser.add_argument('--api_header_path', parser.add_argument('--api_header_path',
help='output of generated api header code file', help='output of generated api header code file',
......
...@@ -351,7 +351,7 @@ def main(): ...@@ -351,7 +351,7 @@ def main():
description='Generate PaddlePaddle C++ Strings API files') description='Generate PaddlePaddle C++ Strings API files')
parser.add_argument('--api_yaml_path', parser.add_argument('--api_yaml_path',
help='path to sparse api yaml file', help='path to sparse api yaml file',
default='paddle/phi/api/yaml/strings_api.yaml') default='paddle/phi/api/yaml/strings_ops.yaml')
parser.add_argument('--api_header_path', parser.add_argument('--api_header_path',
help='output of generated api header code file', help='output of generated api header code file',
......
...@@ -167,7 +167,7 @@ def main(): ...@@ -167,7 +167,7 @@ def main():
parser.add_argument('--api_yaml_path', parser.add_argument('--api_yaml_path',
help='path to api yaml file', help='path to api yaml file',
nargs='+', nargs='+',
default='paddle/phi/api/yaml/api.yaml') default='paddle/phi/api/yaml/ops.yaml')
parser.add_argument( parser.add_argument(
'--wrapped_infermeta_header_path', '--wrapped_infermeta_header_path',
help='output of generated wrapped_infermeta header code file', help='output of generated wrapped_infermeta header code file',
......
- api : abs - op : abs
backward : abs_grad backward : abs_grad
extra : extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false] attrs : [bool use_cudnn = false, bool use_mkldnn = false]
- api : addmm - op : acosh
backward : acosh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : addmm
backward : addmm_grad backward : addmm_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : affine_grid - op : affine_grid
backward : affine_grid_grad backward : affine_grid_grad
extra : extra :
attrs : [bool use_cudnn = true] attrs : [bool use_cudnn = true]
- api : angle - op : angle
backward : angle_grad backward : angle_grad
extra : extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false] attrs : [bool use_cudnn = false, bool use_mkldnn = false]
- api : atan2 - op : asinh
backward : asinh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : atan2
inputs : inputs :
{x : X1, y : X2} {x : X1, y : X2}
outputs : outputs :
out : Out out : Out
- api : batch_norm - op : atanh
backward : atanh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : batch_norm
backward : batch_norm_grad backward : batch_norm_grad
extra : extra :
attrs : [bool use_mkldnn = false, bool fuse_with_relu = false] attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]
- api : bernoulli - op : bernoulli
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : bicubic_interp (bicubic_interp_v2) - op : bicubic_interp (bicubic_interp_v2)
backward : bicubic_interp_grad (bicubic_interp_v2_grad) backward : bicubic_interp_grad (bicubic_interp_v2_grad)
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : bilinear_interp (bilinear_interp_v2) - op : bilinear_interp (bilinear_interp_v2)
backward : bilinear_interp_grad (bilinear_interp_v2_grad) backward : bilinear_interp_grad (bilinear_interp_v2_grad)
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : cholesky - op : ceil
backward : ceil_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : cholesky
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : cholesky_solve - op : cholesky_solve
inputs : inputs :
{x : X, y : Y} {x : X, y : Y}
outputs : outputs :
out : Out out : Out
- api : clip - op : clip
backward : clip_grad backward : clip_grad
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- api : concat - op : concat
backward : concat_grad backward : concat_grad
extra : extra :
attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"] attrs : [bool use_mkldnn = false, bool use_quantizer = false, str mkldnn_data_type = "float32"]
- api : conv2d - op : conv2d
backward : conv2d_grad backward : conv2d_grad
extra : extra :
attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false, attrs : [bool is_test = false, bool use_cudnn = true, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
...@@ -77,7 +97,7 @@ ...@@ -77,7 +97,7 @@
float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false, float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
- api : conv2d_fusion - op : conv2d_fusion
extra : extra :
attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false, attrs : [bool is_test = false, bool use_cudnn = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false, bool use_quantizer = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
...@@ -86,7 +106,7 @@ ...@@ -86,7 +106,7 @@
float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false, float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
- api : conv2d_transpose - op : conv2d_transpose
backward : conv2d_transpose_grad backward : conv2d_transpose_grad
extra : extra :
attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false, attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, bool force_fp32_output = false,
...@@ -94,7 +114,7 @@ ...@@ -94,7 +114,7 @@
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
- api : conv3d - op : conv3d
backward : conv3d_grad backward : conv3d_grad
extra : extra :
attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false, attrs : [bool is_test = false, bool use_cudnn = true, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool fuse_relu = false,
...@@ -102,12 +122,22 @@ ...@@ -102,12 +122,22 @@
bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false, bool use_addto = false, bool fuse_residual_connection = false, bool force_fp32_output = false,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
- api : conv3d_transpose - op : conv3d_transpose
backward : conv3d_transpose_grad backward : conv3d_transpose_grad
extra : extra :
attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] attrs : [bool use_cudnn = true, bool use_mkldnn = false, int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
- api : cross - op : cos
backward : cos_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : cosh
backward : cosh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : cross
inputs : inputs :
{x : X, y : Y} {x : X, y : Y}
attrs : attrs :
...@@ -115,12 +145,12 @@ ...@@ -115,12 +145,12 @@
outputs : outputs :
out : Out out : Out
- api : data_norm - op : data_norm
backward : data_norm_grad backward : data_norm_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : depthwise_conv2d - op : depthwise_conv2d
backward : depthwise_conv2d_grad backward : depthwise_conv2d_grad
extra : extra :
attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false, attrs : [bool is_test = false, bool fuse_relu_before_depthwise_conv = false, bool use_mkldnn = false,
...@@ -130,7 +160,7 @@ ...@@ -130,7 +160,7 @@
float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false, float Scale_in_eltwise = 1.0f, 'float[] Scale_weights = {1.0f}', bool force_fp32_output = false,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false] int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB(), bool exhaustive_search = false]
- api : depthwise_conv2d_transpose - op : depthwise_conv2d_transpose
backward : depthwise_conv2d_transpose_grad backward : depthwise_conv2d_transpose_grad
extra : extra :
attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false, attrs : [bool is_test = false, bool use_cudnn = false, bool use_mkldnn = false, bool force_fp32_output = false,
...@@ -138,274 +168,421 @@ ...@@ -138,274 +168,421 @@
str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f, str fuse_activation = "", float fuse_alpha = 0.0f, float fuse_beta = 0.0f,
int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()] int workspace_size_MB = platform::GetDefaultConvWorkspaceSizeLimitMB()]
- api : diag (diag_v2) - op : diag (diag_v2)
backward : diag_grad (diag_v2_grad) backward : diag_grad (diag_v2_grad)
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : diagonal - op : diagonal
inputs : inputs :
x : Input x : Input
outputs : outputs :
out : Out out : Out
- api : digamma - op : digamma
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : dist - op : dist
inputs : inputs :
{x : X, y : Y} {x : X, y : Y}
outputs : outputs :
out : Out out : Out
- api : dot - op : dot
inputs : inputs :
{x : X, y : Y} {x : X, y : Y}
outputs : outputs :
out : Out out : Out
- api : dropout - op : dropout
backward : dropout_grad backward : dropout_grad
extra : extra :
attrs : [bool fix_seed = false, int seed = 0] attrs : [bool fix_seed = false, int seed = 0]
- api : dropout_nd - op : dropout_nd
backward : dropout_nd_grad backward : dropout_nd_grad
extra : extra :
attrs : [bool fix_seed = false, int seed = 0] attrs : [bool fix_seed = false, int seed = 0]
- api : erf - op : elu
backward : elu_grad
extra :
attrs : [bool use_mkldnn = false]
- op : erf
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : erfinv - op : erfinv
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : fft_c2c - op : exp
backward : exp_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : expm1
backward : expm1_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : fft_c2c
inputs: {x: X} inputs: {x: X}
outputs: {out: Out} outputs: {out: Out}
- api : fft_c2r - op : fft_c2r
inputs: {x: X} inputs: {x: X}
outputs: {out: Out} outputs: {out: Out}
- api : fft_r2c - op : fft_r2c
inputs: {x: X} inputs: {x: X}
outputs: {out: Out} outputs: {out: Out}
- api : frobenius_norm - op : floor
backward : floor_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : frobenius_norm
backward : frobenius_norm_grad backward : frobenius_norm_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : gelu - op : gelu
backward : gelu_grad backward : gelu_grad
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false] attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_cudnn = false]
- api : grid_sampler - op : grid_sampler
backward : grid_sampler_grad backward : grid_sampler_grad
extra : extra :
attrs : [bool use_cudnn = true] attrs : [bool use_cudnn = true]
- api : gru - op : gru
backward : gru_grad backward : gru_grad
extra : extra :
attrs : [bool is_test = false] attrs : [bool is_test = false]
- api : inplace_abn - op : hard_swish
backward : hard_swish_grad
extra :
attrs : [bool use_mkldnn = false]
- op : inplace_abn
backward : inplace_abn_grad backward : inplace_abn_grad
extra : extra :
attrs : [bool use_mkldnn = false, bool fuse_with_relu = false] attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]
- api : layer_norm - op : layer_norm
backward : layer_norm_grad backward : layer_norm_grad
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false] attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : lgamma - op : leaky_relu
backward : leaky_relu_grad
extra :
attrs : [bool use_mkldnn = false]
- op : lgamma
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : linear_interp (linear_interp_v2) - op : linear_interp (linear_interp_v2)
backward : linear_interp_grad (linear_interp_v2_grad) backward : linear_interp_grad (linear_interp_v2_grad)
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : log_softmax - op : log
backward : log_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : log10
backward : log10_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : log1p
backward : log1p_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : log2
backward : log2_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : log_softmax
backward : log_softmax_grad backward : log_softmax_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : lrn - op : logsigmoid
backward : logsigmoid_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : lrn
backward : lrn_grad backward : lrn_grad
extra : extra :
attrs : [bool use_mkldnn = false, bool is_test = false] attrs : [bool use_mkldnn = false, bool is_test = false]
- api : matmul (matmul_v2) - op : matmul (matmul_v2)
backward : matmul_grad (matmul_v2_grad) backward : matmul_grad (matmul_v2_grad)
extra : extra :
attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}', attrs : [bool use_mkldnn = false, 'int[] fused_reshape_Out = {}', 'int[] fused_transpose_Out = {}',
str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}', str mkldnn_data_type = "float32", 'int[] fused_reshape_X = {}', 'int[] fused_reshape_Y = {}',
'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',] 'int[] fused_transpose_X = {}', 'int[] fused_transpose_Y = {}',]
- api : mv - op : mish
backward : mish_grad
extra :
attrs : [bool use_mkldnn = false]
- op : mv
inputs : inputs :
{x : X, vec : Vec} {x : X, vec : Vec}
outputs : outputs :
out : Out out : Out
- api : nearest_interp (nearest_interp_v2) - op : nearest_interp (nearest_interp_v2)
backward : nearest_interp_grad (nearest_interp_v2_grad) backward : nearest_interp_grad (nearest_interp_v2_grad)
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : pad2d - op : pad2d
backward : pad2d_grad backward : pad2d_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : pad3d - op : pad3d
backward : pad3d_grad backward : pad3d_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : partial_sum - op : partial_sum
backward : partial_sum_grad backward : partial_sum_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : poisson - op : poisson
inputs : inputs :
x : X x : X
outputs : outputs :
out : Out out : Out
- api : reduce_all - op : prelu
backward : prelu_grad
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- op : reciprocal
backward : reciprocal_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : reduce_all
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : reduce_amax - op : reduce_amax
backward : reduce_amax_grad backward : reduce_amax_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : reduce_amin - op : reduce_amin
backward : reduce_amin_grad backward : reduce_amin_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : reduce_any - op : reduce_any
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : reduce_max - op : reduce_max
backward : reduce_max_grad backward : reduce_max_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : reduce_mean - op : reduce_mean
backward : reduce_mean_grad backward : reduce_mean_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : reduce_min - op : reduce_min
backward : reduce_min_grad backward : reduce_min_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : reduce_prod - op : reduce_prod
backward : reduce_prod_grad backward : reduce_prod_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : reduce_sum - op : reduce_sum
backward : reduce_sum_grad backward : reduce_sum_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : renorm - op : relu
backward : relu_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : relu6
backward : relu6_grad
extra :
attrs : [bool use_mkldnn = false]
- op : renorm
backward : renorm_grad backward : renorm_grad
extra : extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : rnn - op : rnn
backward : rnn_grad backward : rnn_grad
extra : extra :
attrs : [bool is_test = false] attrs : [bool is_test = false]
- api : seed - op : round
backward : round_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : rsqrt
backward : rsqrt_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : seed
extra : extra :
attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false] attrs : [bool deterministic = false, str rng_name = "", bool force_cpu = false]
- api : shape - op : shape
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- api : shuffle_channel - op : shuffle_channel
backward : shuffle_channel_grad backward : shuffle_channel_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : slice - op : sigmoid
backward : sigmoid_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : silu
backward : silu_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : sin
backward : sin_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : sinh
backward : sinh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : slice
backward : slice_grad backward : slice_grad
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- api : softmax - op : softmax
backward : softmax_grad backward : softmax_grad
extra : extra :
attrs : [bool use_cudnn = false, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false] attrs : [bool use_cudnn = false, bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false]
- api : prelu
backward : prelu_grad - op : softplus
backward : softplus_grad
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool is_test = false] attrs : [bool use_mkldnn = false, bool use_cudnn = false, str fuse_activation_type = "", float fuse_activation_alpha = 0.0f,
float fuse_activation_beta = 0.0f, float fuse_activation_scale = 1.0f]
- op : softsign
backward : softsign_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- api : solve - op : solve
inputs : inputs :
{x : X, y : Y} {x : X, y : Y}
outputs : outputs :
out : Out out : Out
- api : squeeze (squeeze2) - op : sqrt
backward : sqrt_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : square
backward : square_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : squeeze (squeeze2)
backward : squeeze_grad (squeeze2_grad) backward : squeeze_grad (squeeze2_grad)
extra : extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"] attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
- api : stack - op : stack
backward : stack_grad backward : stack_grad
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : sync_batch_norm - op : swish
backward : swish_grad
extra :
attrs : [bool use_mkldnn = false]
- op : sync_batch_norm
backward : sync_batch_norm_grad backward : sync_batch_norm_grad
extra : extra :
attrs : [bool use_mkldnn = false, bool fuse_with_relu = false] attrs : [bool use_mkldnn = false, bool fuse_with_relu = false]
- api : trace - op : tan
backward : tan_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : tanh
backward : tanh_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : tanh_shrink
backward : tanh_shrink_grad
extra :
attrs : [bool use_mkldnn = false, bool use_cudnn = false]
- op : trace
inputs : inputs :
x : Input x : Input
outputs : outputs :
out : Out out : Out
- api : trilinear_interp (trilinear_interp_v2) - op : trilinear_interp (trilinear_interp_v2)
backward : trilinear_interp_grad (trilinear_interp_v2_grad) backward : trilinear_interp_grad (trilinear_interp_v2_grad)
extra : extra :
attrs : [bool use_mkldnn = false] attrs : [bool use_mkldnn = false]
- api : trunc - op : trunc
inputs : inputs :
x : X x : X
outputs : outputs :
......
- api : trace - op : trace
version : version :
- checkpoint : Upgrade trace add a new attribute [axis2] - checkpoint : Upgrade trace add a new attribute [axis2]
action : action :
......
- api : atan2 - op : atan2
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
func : atan2 func : atan2
backward : atan2_grad backward : atan2_grad
- api : bernoulli - op : bernoulli
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
infer_meta : infer_meta :
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
kernel : kernel :
func : bernoulli func : bernoulli
- api : cholesky - op : cholesky
args : (Tensor x, bool upper=false) args : (Tensor x, bool upper=false)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
func : cholesky func : cholesky
backward : cholesky_grad backward : cholesky_grad
- api : cholesky_solve - op : cholesky_solve
args : (Tensor x, Tensor y, bool upper=false) args : (Tensor x, Tensor y, bool upper=false)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -33,7 +33,7 @@ ...@@ -33,7 +33,7 @@
func : cholesky_solve func : cholesky_solve
backward : cholesky_solve_grad backward : cholesky_solve_grad
- api : cross - op : cross
args : (Tensor x, Tensor y, int axis = 9) args : (Tensor x, Tensor y, int axis = 9)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
data_type : x data_type : x
backward : cross_grad backward : cross_grad
- api : diag - op : diag
args : (Tensor x, int offset = 0, float padding_value = 0.0) args : (Tensor x, int offset = 0, float padding_value = 0.0)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
func : diag func : diag
backward : diag_grad backward : diag_grad
- api : diagonal - op : diagonal
args : (Tensor x, int offset = 0, int axis1 = 0, int axis2 = 1) args : (Tensor x, int offset = 0, int axis1 = 0, int axis2 = 1)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
func : diagonal func : diagonal
backward : diagonal_grad backward : diagonal_grad
- api : digamma - op : digamma
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -70,7 +70,7 @@ ...@@ -70,7 +70,7 @@
func : digamma func : digamma
backward : digamma_grad backward : digamma_grad
- api : dist - op : dist
args : (Tensor x, Tensor y, float p = 2.0) args : (Tensor x, Tensor y, float p = 2.0)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -79,7 +79,7 @@ ...@@ -79,7 +79,7 @@
func : dist func : dist
backward : dist_grad backward : dist_grad
- api : dot - op : dot
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
data_type : x data_type : x
backward : dot_grad backward : dot_grad
- api : erf - op : erf
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -98,7 +98,7 @@ ...@@ -98,7 +98,7 @@
func : erf func : erf
backward : erf_grad backward : erf_grad
- api : erfinv - op : erfinv
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
infer_meta : infer_meta :
...@@ -108,7 +108,7 @@ ...@@ -108,7 +108,7 @@
inplace : (x -> out) inplace : (x -> out)
backward : erfinv_grad backward : erfinv_grad
- api : fft_c2c - op : fft_c2c
args : (Tensor x, int64_t[] axes, str normalization, bool forward) args : (Tensor x, int64_t[] axes, str normalization, bool forward)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -117,7 +117,7 @@ ...@@ -117,7 +117,7 @@
func : fft_c2c func : fft_c2c
backward : fft_c2c_grad backward : fft_c2c_grad
- api : fft_c2r - op : fft_c2r
args : (Tensor x, int64_t[] axes, str normalization, bool forward, int64_t last_dim_size=0L) args : (Tensor x, int64_t[] axes, str normalization, bool forward, int64_t last_dim_size=0L)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -126,7 +126,7 @@ ...@@ -126,7 +126,7 @@
func : fft_c2r func : fft_c2r
backward : fft_c2r_grad backward : fft_c2r_grad
- api : fft_r2c - op : fft_r2c
args : (Tensor x, int64_t[] axes, str normalization, bool forward, bool onesided) args : (Tensor x, int64_t[] axes, str normalization, bool forward, bool onesided)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -135,7 +135,7 @@ ...@@ -135,7 +135,7 @@
func : fft_r2c func : fft_r2c
backward : fft_r2c_grad backward : fft_r2c_grad
- api : graph_send_uv - op : graph_send_uv
args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD") args : (Tensor x, Tensor y, Tensor src_index, Tensor dst_index, str message_op = "ADD")
output : Tensor(out) output : Tensor(out)
infer_meta : infer_meta :
...@@ -145,7 +145,7 @@ ...@@ -145,7 +145,7 @@
data_type : x data_type : x
backward : graph_send_uv_grad backward : graph_send_uv_grad
- api : lgamma - op : lgamma
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
infer_meta : infer_meta :
...@@ -154,7 +154,7 @@ ...@@ -154,7 +154,7 @@
func : lgamma func : lgamma
backward : lgamma_grad backward : lgamma_grad
- api : mv - op : mv
args : (Tensor x, Tensor vec) args : (Tensor x, Tensor vec)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -163,7 +163,7 @@ ...@@ -163,7 +163,7 @@
func : mv func : mv
backward : mv_grad backward : mv_grad
- api : poisson - op : poisson
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -172,7 +172,7 @@ ...@@ -172,7 +172,7 @@
func : poisson func : poisson
backward : poisson_grad backward : poisson_grad
- api : solve - op : solve
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -182,7 +182,7 @@ ...@@ -182,7 +182,7 @@
data_type : x data_type : x
backward : solve_grad backward : solve_grad
- api : trace - op : trace
args : (Tensor x, int offset = 0, int axis1 = 0, int axis2 = 1) args : (Tensor x, int offset = 0, int axis1 = 0, int axis2 = 1)
output : Tensor output : Tensor
infer_meta : infer_meta :
...@@ -191,7 +191,7 @@ ...@@ -191,7 +191,7 @@
func : trace func : trace
backward : trace_grad backward : trace_grad
- api : trunc - op : trunc
args : (Tensor x) args : (Tensor x)
output : Tensor output : Tensor
infer_meta : infer_meta :
......
- backward_api : abs_grad - backward_op : abs_grad
forward : tanh(Tensor x) -> Tensor(out) forward : tanh(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
func : abs_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : abs_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
abs_csr_grad {sparse_csr, sparse_csr -> sparse_csr} abs_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : acos_grad - backward_op : acos_grad
forward : acos(Tensor x) -> Tensor(out) forward : acos(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
func : acos_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : acos_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
acos_csr_grad {sparse_csr, sparse_csr -> sparse_csr} acos_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : acosh_grad - backward_op : acosh_grad
forward : acosh(Tensor x) -> Tensor(out) forward : acosh(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
func : acosh_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : acosh_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
acosh_csr_grad {sparse_csr, sparse_csr -> sparse_csr} acosh_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : add_grad - backward_op : add_grad
forward : add(Tensor x, Tensor y) -> Tensor(out) forward : add(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
func : add_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}, func : add_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
add_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr} add_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}
- backward_api : addmm_grad - backward_op : addmm_grad
forward : addmm(Tensor input, Tensor x, Tensor y, float alpha=1.0, float beta=1.0) -> Tensor(out) forward : addmm(Tensor input, Tensor x, Tensor y, float alpha=1.0, float beta=1.0) -> Tensor(out)
args : (Tensor input, Tensor x, Tensor y, Tensor out_grad, float alpha=1.0, float beta=1.0) args : (Tensor input, Tensor x, Tensor y, Tensor out_grad, float alpha=1.0, float beta=1.0)
output : Tensor(input_grad), Tensor(x_grad), Tensor(y_grad) output : Tensor(input_grad), Tensor(x_grad), Tensor(y_grad)
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
addmm_coo_dense_grad {dense, sparse_coo, dense, dense -> dense, sparse_coo, dense}, addmm_coo_dense_grad {dense, sparse_coo, dense, dense -> dense, sparse_coo, dense},
addmm_coo_coo_grad {sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo, sparse_coo} addmm_coo_coo_grad {sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo, sparse_coo}
- backward_api : asin_grad - backward_op : asin_grad
forward : asin(Tensor x) -> Tensor(out) forward : asin(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -48,7 +48,7 @@ ...@@ -48,7 +48,7 @@
func : asin_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : asin_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
asin_csr_grad {sparse_csr, sparse_csr -> sparse_csr} asin_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : asinh_grad - backward_op : asinh_grad
forward : asinh(Tensor x) -> Tensor(out) forward : asinh(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -56,7 +56,7 @@ ...@@ -56,7 +56,7 @@
func : asinh_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : asinh_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
asinh_csr_grad {sparse_csr, sparse_csr -> sparse_csr} asinh_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : atan_grad - backward_op : atan_grad
forward : atan(Tensor x) -> Tensor(out) forward : atan(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
func : atan_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : atan_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
atan_csr_grad {sparse_csr, sparse_csr -> sparse_csr} atan_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : atanh_grad - backward_op : atanh_grad
forward : atanh(Tensor x) -> Tensor(out) forward : atanh(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -72,7 +72,7 @@ ...@@ -72,7 +72,7 @@
func : atanh_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : atanh_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
atanh_csr_grad {sparse_csr, sparse_csr -> sparse_csr} atanh_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : cast_grad - backward_op : cast_grad
forward : cast(Tensor x, DataType index_dtype, DataType value_dtype) -> Tensor(out) forward : cast(Tensor x, DataType index_dtype, DataType value_dtype) -> Tensor(out)
args : (Tensor x, Tensor out_grad, DataType value_dtype) args : (Tensor x, Tensor out_grad, DataType value_dtype)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -81,14 +81,14 @@ ...@@ -81,14 +81,14 @@
cast_csr_grad {sparse_csr, sparse_csr -> sparse_csr} cast_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
data_type : out_grad data_type : out_grad
- backward_api : conv3d_coo_grad - backward_op : conv3d_coo_grad
forward : conv3d_coo (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key) -> Tensor(out), Tensor(rulebook), Tensor(counter) forward : conv3d_coo (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key) -> Tensor(out), Tensor(rulebook), Tensor(counter)
args : (Tensor x, Tensor kernel, Tensor out, Tensor rulebook, Tensor counter, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key) args : (Tensor x, Tensor kernel, Tensor out, Tensor rulebook, Tensor counter, Tensor out_grad, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key)
output : Tensor(x_grad), Tensor(kernel_grad) output : Tensor(x_grad), Tensor(kernel_grad)
kernel : kernel :
func : conv3d_coo_grad{sparse_coo, dense, sparse_coo, dense, dense, sparse_coo -> sparse_coo, dense} func : conv3d_coo_grad{sparse_coo, dense, sparse_coo, dense, dense, sparse_coo -> sparse_coo, dense}
- backward_api : divide_grad - backward_op : divide_grad
forward : divide(Tensor x, Tensor y) -> Tensor(out) forward : divide(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -96,13 +96,13 @@ ...@@ -96,13 +96,13 @@
func : divide_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}, func : divide_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
divide_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr} divide_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}
- backward_api : divide_scalar_grad - backward_op : divide_scalar_grad
forward : divide_scalar (Tensor x, float scalar) -> Tensor(out) forward : divide_scalar (Tensor x, float scalar) -> Tensor(out)
args : (Tensor out_grad, float scalar) args : (Tensor out_grad, float scalar)
output : Tensor(x_grad) output : Tensor(x_grad)
invoke : divide_scalar(out_grad, scalar) invoke : divide_scalar(out_grad, scalar)
- backward_api : expm1_grad - backward_op : expm1_grad
forward : expm1(Tensor x) -> Tensor(out) forward : expm1(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad) args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -110,7 +110,7 @@ ...@@ -110,7 +110,7 @@
func : expm1_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : expm1_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
expm1_csr_grad {sparse_csr, sparse_csr -> sparse_csr} expm1_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : leaky_relu_grad - backward_op : leaky_relu_grad
forward : leaky_relu(Tensor x, float alpha) -> Tensor(out) forward : leaky_relu(Tensor x, float alpha) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float alpha) args : (Tensor x, Tensor out_grad, float alpha)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -118,7 +118,7 @@ ...@@ -118,7 +118,7 @@
func : leaky_relu_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : leaky_relu_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
leaky_relu_csr_grad {sparse_csr, sparse_csr -> sparse_csr} leaky_relu_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : log1p_grad - backward_op : log1p_grad
forward : log1p(Tensor x) -> Tensor(out) forward : log1p(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -126,14 +126,14 @@ ...@@ -126,14 +126,14 @@
func : log1p_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : log1p_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
log1p_csr_grad {sparse_csr, sparse_csr -> sparse_csr} log1p_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : masked_matmul_grad - backward_op : masked_matmul_grad
forward : masked_matmul(Tensor x, Tensor y, Tensor mask) -> Tensor(out) forward : masked_matmul(Tensor x, Tensor y, Tensor mask) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
kernel : kernel :
func : masked_matmul_csr_grad{dense, dense, sparse_csr -> dense, dense} func : masked_matmul_csr_grad{dense, dense, sparse_csr -> dense, dense}
- backward_api : matmul_grad - backward_op : matmul_grad
forward : matmul(Tensor x, Tensor y) -> Tensor(out) forward : matmul(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -143,14 +143,14 @@ ...@@ -143,14 +143,14 @@
matmul_coo_dense_grad {sparse_coo, dense, dense -> sparse_coo, dense}, matmul_coo_dense_grad {sparse_coo, dense, dense -> sparse_coo, dense},
matmul_coo_coo_grad {sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo} matmul_coo_coo_grad {sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}
- backward_api : maxpool_grad - backward_op : maxpool_grad
forward : maxpool(Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides) -> Tensor(out), Tensor(rulebook), Tensor(counter) forward : maxpool(Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides) -> Tensor(out), Tensor(rulebook), Tensor(counter)
args : (Tensor x, Tensor rulebook, Tensor counter, Tensor out, Tensor out_grad, int[] kernel_sizes) args : (Tensor x, Tensor rulebook, Tensor counter, Tensor out, Tensor out_grad, int[] kernel_sizes)
output : Tensor(x_grad) output : Tensor(x_grad)
kernel : kernel :
func : maxpool_coo_grad {sparse_coo, dense, dense, sparse_coo, sparse_coo -> sparse_coo} func : maxpool_coo_grad {sparse_coo, dense, dense, sparse_coo, sparse_coo -> sparse_coo}
- backward_api : multiply_grad - backward_op : multiply_grad
forward : multiply(Tensor x, Tensor y) -> Tensor(out) forward : multiply(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -158,7 +158,7 @@ ...@@ -158,7 +158,7 @@
func : multiply_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}, func : multiply_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
multiply_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr} multiply_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}
- backward_api : mv_grad - backward_op : mv_grad
forward : mv(Tensor x, Tensor vec) -> Tensor(out) forward : mv(Tensor x, Tensor vec) -> Tensor(out)
args : (Tensor x, Tensor vec, Tensor out_grad) args : (Tensor x, Tensor vec, Tensor out_grad)
output : Tensor(x_grad), Tensor(vec_grad) output : Tensor(x_grad), Tensor(vec_grad)
...@@ -166,7 +166,7 @@ ...@@ -166,7 +166,7 @@
func : mv_coo_grad{sparse_coo, dense, dense -> sparse_coo, dense}, func : mv_coo_grad{sparse_coo, dense, dense -> sparse_coo, dense},
mv_csr_grad{sparse_csr, dense, dense -> sparse_csr, dense} mv_csr_grad{sparse_csr, dense, dense -> sparse_csr, dense}
- backward_api : pow_grad - backward_op : pow_grad
forward : pow(Tensor x, float factor) -> Tensor(out) forward : pow(Tensor x, float factor) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float factor) args : (Tensor x, Tensor out_grad, float factor)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -174,7 +174,7 @@ ...@@ -174,7 +174,7 @@
func : pow_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : pow_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
pow_csr_grad {sparse_csr, sparse_csr -> sparse_csr} pow_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : relu6_grad - backward_op : relu6_grad
forward : relu6(Tensor x, float threshold) -> Tensor(out) forward : relu6(Tensor x, float threshold) -> Tensor(out)
args : (Tensor out, Tensor out_grad, float threshold) args : (Tensor out, Tensor out_grad, float threshold)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -182,7 +182,7 @@ ...@@ -182,7 +182,7 @@
func : relu6_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : relu6_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
relu6_csr_grad {sparse_csr, sparse_csr -> sparse_csr} relu6_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : relu_grad - backward_op : relu_grad
forward : relu(Tensor x) -> Tensor(out) forward : relu(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad) args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -190,13 +190,13 @@ ...@@ -190,13 +190,13 @@
func : relu_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : relu_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
relu_csr_grad {sparse_csr, sparse_csr -> sparse_csr} relu_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : scale_grad - backward_op : scale_grad
forward : scale(Tensor x, float scale, float bias, bool bias_after_scale) -> Tensor(out) forward : scale(Tensor x, float scale, float bias, bool bias_after_scale) -> Tensor(out)
args : (Tensor out_grad, float scale) args : (Tensor out_grad, float scale)
output : Tensor(x_grad) output : Tensor(x_grad)
invoke : scale(out_grad, scale, 0.0, true) invoke : scale(out_grad, scale, 0.0, true)
- backward_api : sin_grad - backward_op : sin_grad
forward : sin(Tensor x) -> Tensor(out) forward : sin(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -204,7 +204,7 @@ ...@@ -204,7 +204,7 @@
func : sin_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : sin_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
sin_csr_grad {sparse_csr, sparse_csr -> sparse_csr} sin_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : sinh_grad - backward_op : sinh_grad
forward : sinh(Tensor x) -> Tensor(out) forward : sinh(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -212,21 +212,21 @@ ...@@ -212,21 +212,21 @@
func : sinh_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : sinh_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
sinh_csr_grad {sparse_csr, sparse_csr -> sparse_csr} sinh_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : softmax_grad - backward_op : softmax_grad
forward : softmax(Tensor x, int axis=-1) -> Tensor(out) forward : softmax(Tensor x, int axis=-1) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis) args : (Tensor out, Tensor out_grad, int axis)
output : Tensor(x_grad) output : Tensor(x_grad)
kernel : kernel :
func : softmax_csr_grad{sparse_csr, sparse_csr -> sparse_csr} func : softmax_csr_grad{sparse_csr, sparse_csr -> sparse_csr}
- backward_api : sparse_coo_tensor_grad - backward_op : sparse_coo_tensor_grad
forward : sparse_coo_tensor(Tensor values, Tensor indices, IntArray dense_shape) -> Tensor(out) forward : sparse_coo_tensor(Tensor values, Tensor indices, IntArray dense_shape) -> Tensor(out)
args : (Tensor indices, Tensor out_grad) args : (Tensor indices, Tensor out_grad)
output : Tensor(values_grad) output : Tensor(values_grad)
kernel : kernel :
func : sparse_coo_tensor_grad{dense, sparse_coo -> dense} func : sparse_coo_tensor_grad{dense, sparse_coo -> dense}
- backward_api : sqrt_grad - backward_op : sqrt_grad
forward : sqrt(Tensor x) -> Tensor(out) forward : sqrt(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad) args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -234,7 +234,7 @@ ...@@ -234,7 +234,7 @@
func : sqrt_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : sqrt_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
sqrt_csr_grad {sparse_csr, sparse_csr -> sparse_csr} sqrt_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : square_grad - backward_op : square_grad
forward : square(Tensor x) -> Tensor(out) forward : square(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -242,7 +242,7 @@ ...@@ -242,7 +242,7 @@
func : square_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : square_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
square_csr_grad {sparse_csr, sparse_csr -> sparse_csr} square_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : subtract_grad - backward_op : subtract_grad
forward : subtract(Tensor x, Tensor y) -> Tensor(out) forward : subtract(Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad) args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad) output : Tensor(x_grad), Tensor(y_grad)
...@@ -250,7 +250,7 @@ ...@@ -250,7 +250,7 @@
func : subtract_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo}, func : subtract_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
subtract_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr} subtract_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}
- backward_api : tan_grad - backward_op : tan_grad
forward : tan(Tensor x) -> Tensor(out) forward : tan(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -258,7 +258,7 @@ ...@@ -258,7 +258,7 @@
func : tan_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : tan_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
tan_csr_grad {sparse_csr, sparse_csr -> sparse_csr} tan_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : tanh_grad - backward_op : tanh_grad
forward : tanh(Tensor x) -> Tensor(out) forward : tanh(Tensor x) -> Tensor(out)
args : (Tensor out, Tensor out_grad) args : (Tensor out, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
...@@ -266,28 +266,28 @@ ...@@ -266,28 +266,28 @@
func : tanh_coo_grad {sparse_coo, sparse_coo -> sparse_coo}, func : tanh_coo_grad {sparse_coo, sparse_coo -> sparse_coo},
tanh_csr_grad {sparse_csr, sparse_csr -> sparse_csr} tanh_csr_grad {sparse_csr, sparse_csr -> sparse_csr}
- backward_api : to_dense_grad - backward_op : to_dense_grad
forward : to_dense(Tensor x) -> Tensor(out) forward : to_dense(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
kernel : kernel :
func : coo_to_dense_grad{sparse_coo, dense -> sparse_coo} func : coo_to_dense_grad{sparse_coo, dense -> sparse_coo}
- backward_api : to_sparse_coo_grad - backward_op : to_sparse_coo_grad
forward : to_sparse_coo(Tensor x, int64_t sparse_dim) -> Tensor(out) forward : to_sparse_coo(Tensor x, int64_t sparse_dim) -> Tensor(out)
args : (Tensor out_grad) args : (Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
kernel : kernel :
func : coo_to_dense { sparse_coo -> dense } func : coo_to_dense { sparse_coo -> dense }
- backward_api : values_grad - backward_op : values_grad
forward : values_coo(Tensor x) -> Tensor(out) forward : values_coo(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad) output : Tensor(x_grad)
kernel : kernel :
func : values_coo_grad{sparse_coo, dense-> sparse_coo} func : values_coo_grad{sparse_coo, dense-> sparse_coo}
- backward_api: fused_attention_grad - backward_op: fused_attention_grad
forward : fused_attention_csr(Tensor query, Tensor key, Tensor value, Tensor sparse_mask, Tensor key_padding_mask, Tensor attn_mask) -> Tensor(out), Tensor(softmax) forward : fused_attention_csr(Tensor query, Tensor key, Tensor value, Tensor sparse_mask, Tensor key_padding_mask, Tensor attn_mask) -> Tensor(out), Tensor(softmax)
args: (Tensor query, Tensor key, Tensor value, Tensor softmax, Tensor out_grad) args: (Tensor query, Tensor key, Tensor value, Tensor softmax, Tensor out_grad)
output : Tensor(query_grad), Tensor(key_grad), Tensor(value_grad) output : Tensor(query_grad), Tensor(key_grad), Tensor(value_grad)
......
- api : abs - op : abs
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
layout : x layout : x
backward : abs_grad backward : abs_grad
- api : acos - op : acos
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
layout : x layout : x
backward : acos_grad backward : acos_grad
- api : acosh - op : acosh
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
layout : x layout : x
backward : acosh_grad backward : acosh_grad
- api : add - op : add
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
layout : x layout : x
backward : add_grad backward : add_grad
- api : asin - op : asin
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
layout : x layout : x
backward : asin_grad backward : asin_grad
- api : asinh - op : asinh
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -52,7 +52,7 @@ ...@@ -52,7 +52,7 @@
layout : x layout : x
backward : asinh_grad backward : asinh_grad
- api : atan - op : atan
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
layout : x layout : x
backward : atan_grad backward : atan_grad
- api : atanh - op : atanh
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -70,7 +70,7 @@ ...@@ -70,7 +70,7 @@
layout : x layout : x
backward : atanh_grad backward : atanh_grad
- api : cast - op : cast
args : (Tensor x, DataType index_dtype=DataType::UNDEFINED, DataType value_dtype=DataType::UNDEFINED) args : (Tensor x, DataType index_dtype=DataType::UNDEFINED, DataType value_dtype=DataType::UNDEFINED)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -80,7 +80,7 @@ ...@@ -80,7 +80,7 @@
data_type : x data_type : x
backward : cast_grad backward : cast_grad
- api : conv3d - op : conv3d
args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key) args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm, str key)
output : Tensor(out), Tensor(rulebook), Tensor(counter) output : Tensor(out), Tensor(rulebook), Tensor(counter)
kernel : kernel :
...@@ -89,7 +89,7 @@ ...@@ -89,7 +89,7 @@
intermediate: rulebook, counter intermediate: rulebook, counter
backward : conv3d_coo_grad backward : conv3d_coo_grad
- api : divide - op : divide
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -98,7 +98,7 @@ ...@@ -98,7 +98,7 @@
layout : x layout : x
backward : divide_grad backward : divide_grad
- api : divide_scalar - op : divide_scalar
args : (Tensor x, float scalar) args : (Tensor x, float scalar)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -106,7 +106,7 @@ ...@@ -106,7 +106,7 @@
divide_csr_scalar{sparse_csr -> sparse_csr} divide_csr_scalar{sparse_csr -> sparse_csr}
backward : divide_scalar_grad backward : divide_scalar_grad
- api : expm1 - op : expm1
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -115,7 +115,7 @@ ...@@ -115,7 +115,7 @@
layout : x layout : x
backward : expm1_grad backward : expm1_grad
- api : leaky_relu - op : leaky_relu
args : (Tensor x, float alpha) args : (Tensor x, float alpha)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -124,7 +124,7 @@ ...@@ -124,7 +124,7 @@
layout : x layout : x
backward : leaky_relu_grad backward : leaky_relu_grad
- api : log1p - op : log1p
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -133,7 +133,7 @@ ...@@ -133,7 +133,7 @@
layout : x layout : x
backward : log1p_grad backward : log1p_grad
- api : multiply - op : multiply
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -142,7 +142,7 @@ ...@@ -142,7 +142,7 @@
layout : x layout : x
backward : multiply_grad backward : multiply_grad
- api : pow - op : pow
args : (Tensor x, float factor) args : (Tensor x, float factor)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -151,7 +151,7 @@ ...@@ -151,7 +151,7 @@
layout : x layout : x
backward : pow_grad backward : pow_grad
- api : relu - op : relu
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -160,7 +160,7 @@ ...@@ -160,7 +160,7 @@
layout : x layout : x
backward : relu_grad backward : relu_grad
- api : relu6 - op : relu6
args : (Tensor x, float threshold) args : (Tensor x, float threshold)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -169,7 +169,7 @@ ...@@ -169,7 +169,7 @@
layout : x layout : x
backward : relu6_grad backward : relu6_grad
- api : scale - op : scale
args : (Tensor x, float scale, float bias, bool bias_after_scale) args : (Tensor x, float scale, float bias, bool bias_after_scale)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -177,7 +177,7 @@ ...@@ -177,7 +177,7 @@
scale_csr{sparse_csr -> sparse_csr} scale_csr{sparse_csr -> sparse_csr}
backward : scale_grad backward : scale_grad
- api : sin - op : sin
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -186,7 +186,7 @@ ...@@ -186,7 +186,7 @@
layout : x layout : x
backward : sin_grad backward : sin_grad
- api : sinh - op : sinh
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -195,7 +195,7 @@ ...@@ -195,7 +195,7 @@
layout : x layout : x
backward : sinh_grad backward : sinh_grad
- api : softmax - op : softmax
args : (Tensor x, int axis=-1) args : (Tensor x, int axis=-1)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -203,7 +203,7 @@ ...@@ -203,7 +203,7 @@
layout : x layout : x
backward : softmax_grad backward : softmax_grad
- api : sparse_coo_tensor - op : sparse_coo_tensor
args : (Tensor values, Tensor indices, IntArray dense_shape) args : (Tensor values, Tensor indices, IntArray dense_shape)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -212,7 +212,7 @@ ...@@ -212,7 +212,7 @@
data_type : values data_type : values
backward : sparse_coo_tensor_grad backward : sparse_coo_tensor_grad
- api : sqrt - op : sqrt
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -221,7 +221,7 @@ ...@@ -221,7 +221,7 @@
layout : x layout : x
backward : sqrt_grad backward : sqrt_grad
- api : square - op : square
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -230,7 +230,7 @@ ...@@ -230,7 +230,7 @@
layout : x layout : x
backward : square_grad backward : square_grad
- api : subtract - op : subtract
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -239,7 +239,7 @@ ...@@ -239,7 +239,7 @@
layout : x layout : x
backward : subtract_grad backward : subtract_grad
- api : tan - op : tan
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -248,7 +248,7 @@ ...@@ -248,7 +248,7 @@
layout : x layout : x
backward : tan_grad backward : tan_grad
- api : tanh - op : tanh
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -257,7 +257,7 @@ ...@@ -257,7 +257,7 @@
layout : x layout : x
backward : tanh_grad backward : tanh_grad
- api : to_dense - op : to_dense
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -265,7 +265,7 @@ ...@@ -265,7 +265,7 @@
csr_to_dense {sparse_csr -> dense} csr_to_dense {sparse_csr -> dense}
backward : to_dense_grad backward : to_dense_grad
- api : to_sparse_coo - op : to_sparse_coo
args : (Tensor x, int64_t sparse_dim) args : (Tensor x, int64_t sparse_dim)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -273,14 +273,14 @@ ...@@ -273,14 +273,14 @@
csr_to_coo { sparse_csr -> sparse_coo} csr_to_coo { sparse_csr -> sparse_coo}
backward : to_sparse_coo_grad backward : to_sparse_coo_grad
- api : to_sparse_csr - op : to_sparse_csr
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
func : dense_to_csr {dense -> sparse_csr}, func : dense_to_csr {dense -> sparse_csr},
coo_to_csr {sparse_coo -> sparse_csr} coo_to_csr {sparse_coo -> sparse_csr}
- api : values - op : values
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -289,7 +289,7 @@ ...@@ -289,7 +289,7 @@
layout : x layout : x
backward : values_grad backward : values_grad
- api: addmm - op: addmm
args : (Tensor input, Tensor x, Tensor y, float alpha=1.0, float beta=1.0) args : (Tensor input, Tensor x, Tensor y, float alpha=1.0, float beta=1.0)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -300,14 +300,14 @@ ...@@ -300,14 +300,14 @@
layout : x layout : x
backward: addmm_grad backward: addmm_grad
- api: coalesce - op: coalesce
args : (Tensor x) args : (Tensor x)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
func: coalesce{sparse_coo -> sparse_coo} func: coalesce{sparse_coo -> sparse_coo}
layout : x layout : x
- api: full_like - op: full_like
args : (Tensor x, Scalar value, DataType dtype=DataType::UNDEFINED) args : (Tensor x, Scalar value, DataType dtype=DataType::UNDEFINED)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -316,7 +316,7 @@ ...@@ -316,7 +316,7 @@
layout : x layout : x
data_type : dtype data_type : dtype
- api: fused_attention - op: fused_attention
args : (Tensor query, Tensor key, Tensor value, Tensor sparse_mask, Tensor key_padding_mask, Tensor attn_mask) args : (Tensor query, Tensor key, Tensor value, Tensor sparse_mask, Tensor key_padding_mask, Tensor attn_mask)
output : Tensor(out), Tensor(softmax) output : Tensor(out), Tensor(softmax)
kernel : kernel :
...@@ -327,7 +327,7 @@ ...@@ -327,7 +327,7 @@
intermediate : softmax intermediate : softmax
backward: fused_attention_grad backward: fused_attention_grad
- api: masked_matmul - op: masked_matmul
args : (Tensor x, Tensor y, Tensor mask) args : (Tensor x, Tensor y, Tensor mask)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -335,7 +335,7 @@ ...@@ -335,7 +335,7 @@
layout : x layout : x
backward: masked_matmul_grad backward: masked_matmul_grad
- api: matmul - op: matmul
args : (Tensor x, Tensor y) args : (Tensor x, Tensor y)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
...@@ -346,7 +346,7 @@ ...@@ -346,7 +346,7 @@
layout : x layout : x
backward: matmul_grad backward: matmul_grad
- api: maxpool - op: maxpool
args : (Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides) args : (Tensor x, int[] kernel_sizes, int[] paddings, int[] dilations, int[] strides)
output : Tensor(out), Tensor(rulebook), Tensor(counter) output : Tensor(out), Tensor(rulebook), Tensor(counter)
kernel : kernel :
...@@ -355,7 +355,7 @@ ...@@ -355,7 +355,7 @@
intermediate : rulebook, counter intermediate : rulebook, counter
backward : maxpool_grad backward : maxpool_grad
- api: mv - op: mv
args : (Tensor x, Tensor vec) args : (Tensor x, Tensor vec)
output : Tensor(out) output : Tensor(out)
kernel : kernel :
......
- api : empty - op : empty
args : (IntArray shape, Place place=CPUPlace()) args : (IntArray shape, Place place=CPUPlace())
output : Tensor(out@StringTensor) output : Tensor(out@StringTensor)
infer_meta : infer_meta :
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
param : [shape] param : [shape]
backend : place backend : place
- api : empty_like - op : empty_like
args : (Tensor x, Place place = {}) args : (Tensor x, Place place = {})
output : Tensor(out@StringTensor) output : Tensor(out@StringTensor)
infer_meta : infer_meta :
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
param : [x] param : [x]
backend : place > x backend : place > x
- api : lower - op : lower
args : (Tensor x, bool use_utf8_encoding) args : (Tensor x, bool use_utf8_encoding)
output : Tensor(out@StringTensor) output : Tensor(out@StringTensor)
infer_meta : infer_meta :
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
kernel : kernel :
func : strings_lower func : strings_lower
- api : upper - op : upper
args : (Tensor x, bool use_utf8_encoding) args : (Tensor x, bool use_utf8_encoding)
output : Tensor(out@StringTensor) output : Tensor(out@StringTensor)
infer_meta : infer_meta :
......
...@@ -3537,7 +3537,7 @@ void StridedSliceInferMeta(const MetaTensor& x, ...@@ -3537,7 +3537,7 @@ void StridedSliceInferMeta(const MetaTensor& x,
/* Why not use SumRawInferMeta directly? /* Why not use SumRawInferMeta directly?
Because we need make InferMetaFunction's args follow the design of Because we need make InferMetaFunction's args follow the design of
api.yaml ops.yaml
*/ */
void SumInferMeta(const MetaTensor& x, void SumInferMeta(const MetaTensor& x,
const IntArray& axis, const IntArray& axis,
......
...@@ -24,6 +24,9 @@ import paddle.fluid as fluid ...@@ -24,6 +24,9 @@ import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard from paddle.fluid import compiler, Program, program_guard
from paddle.fluid.backward import append_backward from paddle.fluid.backward import append_backward
import paddle.fluid.framework as framework import paddle.fluid.framework as framework
import gradient_checker
from decorator_helper import prog_scope
import paddle.fluid.layers as layers
class TestAssignOp(op_test.OpTest): class TestAssignOp(op_test.OpTest):
...@@ -258,5 +261,79 @@ class TestAssignOpErrorApi(unittest.TestCase): ...@@ -258,5 +261,79 @@ class TestAssignOpErrorApi(unittest.TestCase):
paddle.disable_static() paddle.disable_static()
class TestAssignDoubleGradCheck(unittest.TestCase):
def assign_wrapper(self, x):
return paddle.fluid.layers.assign(x[0])
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32
data = layers.data('data', [3, 4, 5], False, dtype)
data.persistable = True
out = paddle.fluid.layers.assign(data)
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.double_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.double_grad_check_for_dygraph(self.assign_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
class TestAssignTripleGradCheck(unittest.TestCase):
def assign_wrapper(self, x):
return paddle.fluid.layers.assign(x[0])
@prog_scope()
def func(self, place):
# the shape of input variable should be clearly specified, not inlcude -1.
eps = 0.005
dtype = np.float32
data = layers.data('data', [3, 4, 5], False, dtype)
data.persistable = True
out = paddle.fluid.layers.assign(data)
data_arr = np.random.uniform(-1, 1, data.shape).astype(dtype)
gradient_checker.triple_grad_check([data],
out,
x_init=[data_arr],
place=place,
eps=eps)
fluid.set_flags({"FLAGS_retain_grad_for_all_tensor": True})
gradient_checker.triple_grad_check_for_dygraph(self.assign_wrapper,
[data],
out,
x_init=[data_arr],
place=place)
def test_grad(self):
paddle.enable_static()
places = [fluid.CPUPlace()]
if core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for p in places:
self.func(p)
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()
...@@ -64,7 +64,7 @@ def get_skipped_kernel_list(): ...@@ -64,7 +64,7 @@ def get_skipped_kernel_list():
for api in infer_meta_data: for api in infer_meta_data:
if "kernel" not in api or "infer_meta" not in api: if "kernel" not in api or "infer_meta" not in api:
continue continue
if api["api"] in skiped_api_list["phi_apis"]: if api["op"] in skiped_api_list["phi_apis"]:
skiped_kernel_list.append(api["kernel"]["func"]) skiped_kernel_list.append(api["kernel"]["func"])
skiped_kernel_list += skiped_api_list["phi_kernels"] skiped_kernel_list += skiped_api_list["phi_kernels"]
return skiped_kernel_list return skiped_kernel_list
......
...@@ -39,7 +39,7 @@ def get_skipped_kernel_list(): ...@@ -39,7 +39,7 @@ def get_skipped_kernel_list():
for api in infer_meta_data: for api in infer_meta_data:
if "kernel" not in api or "infer_meta" not in api: if "kernel" not in api or "infer_meta" not in api:
continue continue
if api["api"] in skiped_api_list["phi_apis"]: if api["op"] in skiped_api_list["phi_apis"]:
skiped_kernel_list.append(api["kernel"]["func"]) skiped_kernel_list.append(api["kernel"]["func"])
skiped_kernel_list += skiped_api_list["phi_kernels"] skiped_kernel_list += skiped_api_list["phi_kernels"]
return skiped_kernel_list return skiped_kernel_list
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册