From 64696b9b9d16b8ca146ed6c853c370e7a1e42360 Mon Sep 17 00:00:00 2001 From: Wang Xin Date: Tue, 20 Jun 2023 14:30:37 +0800 Subject: [PATCH] static graph autogen code support for exponential_ op (#54734) * static graph autogen code support for exponential_ op * set default value --- paddle/fluid/operators/exponential_op.cc | 81 ------------------- .../fluid/operators/generator/parse_utils.py | 6 ++ paddle/phi/api/yaml/op_compat.yaml | 9 +++ paddle/phi/api/yaml/static_backward.yaml | 8 ++ paddle/phi/api/yaml/static_ops.yaml | 11 +++ paddle/phi/ops/compat/exponential_sig.cc | 26 ------ 6 files changed, 34 insertions(+), 107 deletions(-) delete mode 100644 paddle/fluid/operators/exponential_op.cc delete mode 100644 paddle/phi/ops/compat/exponential_sig.cc diff --git a/paddle/fluid/operators/exponential_op.cc b/paddle/fluid/operators/exponential_op.cc deleted file mode 100644 index 52ddd9ebfa1..00000000000 --- a/paddle/fluid/operators/exponential_op.cc +++ /dev/null @@ -1,81 +0,0 @@ -/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/infermeta/unary.h" - -namespace paddle { -namespace operators { - -class ExponentialOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; - - protected: - phi::KernelKey GetExpectedKernelType( - const framework::ExecutionContext &ctx) const override { - return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"), - ctx.GetPlace()); - } -}; - -class ExponentialOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddComment(R"DOC( -This operator fills the input tensor with random values sampled from a -exponential distribution. -)DOC"); - AddInput("X", "The input tensor."); - AddOutput("Out", "The output tensor of exponential OP."); - AddAttr( - "lambda", "lambd parameter of exponential distribution. [default 1.0].") - .SetDefault(1.0f); - } -}; - -template -class ExponentialGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr retv) const override { - retv->SetType("fill_any_like"); - retv->SetInput("X", this->OutputGrad("Out")); - retv->SetAttr("value", 0.0f); - retv->SetOutput("Out", this->InputGrad("X")); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; -namespace plat = paddle::platform; - -DECLARE_INPLACE_OP_INFERER(ExponentialInferer, {"X", "Out"}); - -DECLARE_INFER_SHAPE_FUNCTOR(exponential, - ExponentialInfershapeFunctor, - PD_INFER_META(phi::UnchangedInferMeta)); - -REGISTER_OPERATOR(exponential, - ops::ExponentialOp, - ops::ExponentialOpMaker, - ops::ExponentialGradOpMaker, - ops::ExponentialGradOpMaker, - ExponentialInferer, - ExponentialInfershapeFunctor); diff --git a/paddle/fluid/operators/generator/parse_utils.py b/paddle/fluid/operators/generator/parse_utils.py index 7e09706d21a..8f803fa4e70 100644 --- a/paddle/fluid/operators/generator/parse_utils.py +++ b/paddle/fluid/operators/generator/parse_utils.py @@ -29,6 +29,12 @@ def to_named_dict(items: List[Dict], is_op=False) -> Dict[str, Dict]: item["name"] = ( item["name"] if item["name"][-1] != '_' else item["name"][:-1] ) + if "forward" in item: + item["forward"]["name"] = ( + item["forward"]["name"] + if item["forward"]["name"][-1] != '_' + else item["forward"]["name"][:-1] + ) name = item["name"] named_dict[name] = item else: diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index 4d1d29f5f68..28ffb681293 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -932,6 +932,15 @@ extra : attrs : [bool use_mkldnn = false, bool use_cudnn = false] +- op : exponential_ + backward : exponential__grad + inputs : + x : X + outputs : + out : Out + attrs : + lam : lambda + - op : eye outputs : out : Out diff --git a/paddle/phi/api/yaml/static_backward.yaml b/paddle/phi/api/yaml/static_backward.yaml index 9f725070e5b..0c1d0414b11 100755 --- a/paddle/phi/api/yaml/static_backward.yaml +++ b/paddle/phi/api/yaml/static_backward.yaml @@ -89,6 +89,14 @@ data_type : out_grad no_need_buffer : weight +- backward_op : exponential__grad + forward : exponential_ (Tensor x, float lam=1.0f) -> Tensor(out) + args : (Tensor out_grad) + output : Tensor(x_grad) + infer_meta : + func : UnchangedInferMeta + invoke : full_like(out_grad, 0.0f) + - backward_op : frobenius_norm_grad forward: frobenius_norm (Tensor x, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) -> Tensor(out) args : (Tensor x, Tensor out, Tensor out_grad, IntArray axis={0}, bool keepdim=false, bool reduce_all=false, int in_dtype=-1, int out_dtype=-1) diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index abacbe9f716..513af8a40e6 100755 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -182,6 +182,17 @@ backend : x force_backend : force_cpu +- op : exponential_ + args : (Tensor x, float lam = 1.0f) + output : Tensor(out) + infer_meta : + func : UnchangedInferMeta + param : [x] + kernel : + func : exponential + inplace : (x -> out) + backward : exponential__grad + - op : eye args : (Scalar(int64_t) num_rows, Scalar(int64_t) num_columns = -1, DataType dtype = DataType::FLOAT32) output : Tensor(out) diff --git a/paddle/phi/ops/compat/exponential_sig.cc b/paddle/phi/ops/compat/exponential_sig.cc deleted file mode 100644 index 79bad591842..00000000000 --- a/paddle/phi/ops/compat/exponential_sig.cc +++ /dev/null @@ -1,26 +0,0 @@ -/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature ExponentialOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature("exponential", {"X"}, {"lambda"}, {"Out"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(exponential, phi::ExponentialOpArgumentMapping); -- GitLab