From 36eb5cde31401032d1282f47fbebc654a0db450d Mon Sep 17 00:00:00 2001 From: RedContritio Date: Fri, 14 Jul 2023 17:18:20 +0800 Subject: [PATCH] support auto generate for static op elementwise_min (#55008) --- .../elementwise/elementwise_min_op.cc | 168 ------------------ .../elementwise/unity_build_rule.cmake | 5 +- paddle/phi/api/yaml/backward.yaml | 11 ++ paddle/phi/api/yaml/legacy_backward.yaml | 10 -- paddle/phi/api/yaml/legacy_ops.yaml | 10 -- paddle/phi/api/yaml/op_compat.yaml | 12 ++ paddle/phi/api/yaml/op_version.yaml | 8 + paddle/phi/api/yaml/ops.yaml | 10 ++ paddle/phi/api/yaml/static_backward.yaml | 12 ++ paddle/phi/api/yaml/static_ops.yaml | 9 + paddle/phi/ops/compat/elementwise_sig.cc | 21 +-- 11 files changed, 67 insertions(+), 209 deletions(-) delete mode 100644 paddle/fluid/operators/elementwise/elementwise_min_op.cc diff --git a/paddle/fluid/operators/elementwise/elementwise_min_op.cc b/paddle/fluid/operators/elementwise/elementwise_min_op.cc deleted file mode 100644 index fad0e3008ec..00000000000 --- a/paddle/fluid/operators/elementwise/elementwise_min_op.cc +++ /dev/null @@ -1,168 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include - -#include "paddle/fluid/operators/elementwise/elementwise_op.h" -#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h" -#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h" -#include "paddle/fluid/prim/utils/static/desc_tensor.h" - -namespace paddle { -namespace framework { -class OpDesc; -} // namespace framework -namespace imperative { -class OpBase; -} // namespace imperative -} // namespace paddle - -namespace paddle { -namespace operators { - -class ElementwiseMinOpMaker : public ElementwiseOpMaker { - protected: - std::string GetName() const override { return "Min"; } - std::string GetEquation() const override { return "Out = min(X, Y)"; } - - void AddInputX() override { - AddInput("X", "The first tensor holding the elements to be compared."); - } - - void AddInputY() override { - AddInput("Y", "The second tensor holding the elements to be compared."); - } - - std::string GetOpFunctionality() const override { - return "Compare two tensors and returns a new tensor containing the " - "element-wise minima."; - } -}; - -class ElementwiseFMinOpMaker : public ElementwiseOpMaker { - protected: - std::string GetName() const override { return "FMin"; } - std::string GetEquation() const override { return "Out = fmin(X, Y)"; } - - void AddInputX() override { - AddInput("X", "The first tensor holding the elements to be compared."); - } - - void AddInputY() override { - AddInput("Y", "The second tensor holding the elements to be compared."); - } - - std::string GetOpFunctionality() const override { - return "Compare two tensors and returns a new tensor containing the " - "element-wise minima. If the element of one tensor is nan, " - "return the element value of the other tensor, if both are nan, " - "return the first nan"; - } -}; - -class ElementwiseMinCompositeGradOpMaker - : public prim::CompositeGradOpMakerBase { - using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase; - - public: - void Apply() override { - paddle::Tensor x = this->GetSingleForwardInput("X"); - paddle::Tensor y = this->GetSingleForwardInput("Y"); - paddle::Tensor out_grad = this->GetSingleOutputGrad("Out"); - paddle::Tensor dx = this->GetSingleInputGrad("X"); - auto* dx_ptr = this->GetOutputPtr(&dx); - std::string dx_name = this->GetOutputName(dx); - paddle::Tensor dy = this->GetSingleInputGrad("Y"); - auto* dy_ptr = this->GetOutputPtr(&dy); - std::string dy_name = this->GetOutputName(dy); - VLOG(6) << "Runing minimum_grad composite func"; - int axis = static_cast(this->Attr("axis")); - PADDLE_ENFORCE_EQ( - axis, - -1, - phi::errors::InvalidArgument( - "We only support axis = -1 in composite minimum_grad but we got: ", - axis)); - prim::minimum_grad(x, y, out_grad, dx_ptr, dy_ptr); - this->RecoverOutputName(dx, dx_name); - this->RecoverOutputName(dy, dy_name); - } -}; - -template -class ElementwiseMinGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("elementwise_min_grad"); - op->SetInput("X", this->Input("X")); - op->SetInput("Y", this->Input("Y")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y")); - op->SetAttrMap(this->Attrs()); - } -}; - -template -class ElementwiseFMinGradOpMaker : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("elementwise_fmin_grad"); - op->SetInput("X", this->Input("X")); - op->SetInput("Y", this->Input("Y")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y")); - op->SetAttrMap(this->Attrs()); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -REGISTER_OPERATOR(elementwise_min, - ops::ElementwiseOp, - ops::ElementwiseMinOpMaker, - ops::ElementwiseOpInferVarType, - ops::ElementwiseMinGradOpMaker, - ops::ElementwiseMinGradOpMaker, - ops::ElementwiseMinCompositeGradOpMaker); - -REGISTER_OPERATOR(elementwise_min_grad, ops::ElementwiseOpGrad); - -REGISTER_OP_VERSION(elementwise_min) - .AddCheckpoint( - R"ROC(Register elementwise_min for adding the attribute of Scale_y)ROC", - paddle::framework::compatible::OpVersionDesc().NewAttr( - "Scale_y", - "In order to support the function of scaling the input Y when " - "using the operator of elementwise_min.", - 1.0f)); - -REGISTER_OPERATOR(elementwise_fmin, - ops::ElementwiseOp, - ops::ElementwiseFMinOpMaker, - ops::ElementwiseOpInferVarType, - ops::ElementwiseFMinGradOpMaker, - ops::ElementwiseFMinGradOpMaker); - -REGISTER_OPERATOR(elementwise_fmin_grad, ops::ElementwiseOpGrad); diff --git a/paddle/fluid/operators/elementwise/unity_build_rule.cmake b/paddle/fluid/operators/elementwise/unity_build_rule.cmake index c7087165ed0..dda257b505d 100644 --- a/paddle/fluid/operators/elementwise/unity_build_rule.cmake +++ b/paddle/fluid/operators/elementwise/unity_build_rule.cmake @@ -4,9 +4,8 @@ # Generally, the combination rules in this file do not need to be modified. # If there are some redefined error in compiling with the source file which # in combination rule, you can remove the source file from the following rules. -register_unity_group( - cc elementwise_add_op.cc elementwise_div_op.cc elementwise_min_op.cc - elementwise_mul_op.cc elementwise_sub_op.cc) +register_unity_group(cc elementwise_add_op.cc elementwise_div_op.cc + elementwise_mul_op.cc elementwise_sub_op.cc) register_unity_group( cu elementwise_add_op.cu diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index 31742d87d22..b6347e35fcc 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -870,6 +870,17 @@ func : fmax_grad data_type : out_grad +- backward_op : fmin_grad + forward : fmin(Tensor x, Tensor y) -> Tensor(out) + args : (Tensor x, Tensor y, Tensor out_grad) + output : Tensor(x_grad), Tensor(y_grad) + infer_meta : + func : GeneralBinaryGradInferMeta + param: [x, y] + kernel : + func : fmin_grad + data_type : out_grad + - backward_op : fold_grad forward: fold (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out) args: (Tensor x, Tensor out_grad, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index c9d2f994621..6651b1bcabd 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -232,16 +232,6 @@ func : UnchangedInferMeta invoke : zeros_like(out_grad) -- backward_op : fmin_grad - forward : fmin(Tensor x, Tensor y) -> Tensor(out) - args : (Tensor x, Tensor y, Tensor out_grad) - output : Tensor(x_grad), Tensor(y_grad) - infer_meta : - func : GeneralBinaryGradInferMeta - param: [x, y] - kernel : - func : fmin_grad - - backward_op : frobenius_norm_grad forward : frobenius_norm(Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all) -> Tensor(out) args : (Tensor x, Tensor out, Tensor out_grad, int64_t[] axis, bool keep_dim, bool reduce_all) diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index e0247133dab..12f9953687f 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -326,16 +326,6 @@ kernel : func : floor_divide -- op : fmin - args : (Tensor x, Tensor y) - output : Tensor(out) - infer_meta : - param: [x, y] - func : ElementwiseInferMeta - kernel : - func : fmin - backward : fmin_grad - - op : frobenius_norm args : (Tensor x, int64_t[] axis, bool keep_dim, bool reduce_all) output : Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index aea107f2e4c..b06fa919d15 100755 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -1107,9 +1107,15 @@ - op : fmin (elementwise_fmin) backward : fmin_grad (elementwise_fmin_grad) + inputs : + {x : X, y : Y} + outputs : + {out : Out} extra : attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32", bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + complex_promote : [X, Y] + manual_signature : [fmin] - op : fold inputs : @@ -1839,9 +1845,15 @@ - op : minimum (elementwise_min) backward : minimum_grad (elementwise_min_grad) + inputs : + {x : X, y : Y} + outputs : + {out : Out} extra : attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32", bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f] + complex_promote : [X, Y] + manual_signature : [minimum] - op : mish backward : mish_grad diff --git a/paddle/phi/api/yaml/op_version.yaml b/paddle/phi/api/yaml/op_version.yaml index 73f580d6a86..bd296a6191d 100644 --- a/paddle/phi/api/yaml/op_version.yaml +++ b/paddle/phi/api/yaml/op_version.yaml @@ -181,6 +181,14 @@ comment : In order to support the function of scaling the input Y when using the operator of elementwise_max. default : 1.0 +- op : elementwise_min + version : + - checkpoint : Register elementwise_min for adding the attribute of Scale_y. + action : + - add_attr : Scale_y + comment : In order to support the function of scaling the input Y when using the operator of elementwise_min. + default : 1.0 + - op : elementwise_mod version : - checkpoint : Register elementwise_mod for adding the attribute of Scale_y diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index b87794af529..28e52692d3a 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -941,6 +941,16 @@ func : fmax backward : fmax_grad +- op : fmin + args : (Tensor x, Tensor y) + output : Tensor(out) + infer_meta : + func : ElementwiseInferMeta + param: [x, y] + kernel : + func : fmin + backward : fmin_grad + - op : fold args: (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) output: Tensor(out) diff --git a/paddle/phi/api/yaml/static_backward.yaml b/paddle/phi/api/yaml/static_backward.yaml index 831b240b0af..ebbca84b67d 100755 --- a/paddle/phi/api/yaml/static_backward.yaml +++ b/paddle/phi/api/yaml/static_backward.yaml @@ -211,6 +211,18 @@ kernel : func : min_grad +- backward_op : minimum_grad + forward : minimum(Tensor x, Tensor y, int axis = -1) -> Tensor(out) + args : (Tensor x, Tensor y, Tensor out_grad) + output : Tensor(x_grad), Tensor(y_grad) + infer_meta : + func : GeneralBinaryGradInferMeta + param: [x, y] + kernel : + func : minimum_grad + data_type : out_grad + composite : minimum_grad(x, y, out_grad, x_grad, y_grad) + - backward_op : norm_grad forward : norm (Tensor x, int axis, float epsilon=1.0e-10f, bool is_test=false) -> Tensor(out), Tensor(norm) args : (Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test) diff --git a/paddle/phi/api/yaml/static_ops.yaml b/paddle/phi/api/yaml/static_ops.yaml index 92514691d03..069fdd7289f 100755 --- a/paddle/phi/api/yaml/static_ops.yaml +++ b/paddle/phi/api/yaml/static_ops.yaml @@ -407,6 +407,15 @@ param : [x, axis, keepdim, reduce_all] backward : min_grad +- op : minimum + args : (Tensor x, Tensor y, int axis = -1) + output : Tensor(out) + infer_meta : + func : ElementwiseRawInferMeta + kernel : + func : minimum + backward : minimum_grad + - op : norm args : (Tensor x, int axis, float epsilon=1.0e-10f, bool is_test=false) output : Tensor(out), Tensor(norm) diff --git a/paddle/phi/ops/compat/elementwise_sig.cc b/paddle/phi/ops/compat/elementwise_sig.cc index 4a74cf334f6..b1150268fba 100644 --- a/paddle/phi/ops/compat/elementwise_sig.cc +++ b/paddle/phi/ops/compat/elementwise_sig.cc @@ -78,6 +78,9 @@ KernelSignature ElementwiseMaxOpArgumentMapping( KernelSignature ElementwiseMinOpArgumentMapping( const ArgumentMappingContext& ctx) { + if (ctx.IsForInferShape()) { + return KernelSignature("minimum_raw", {"X", "Y"}, {"axis"}, {"Out"}); + } int axis = paddle::any_cast(ctx.Attr("axis")); if (axis == -1) { return KernelSignature("minimum", {"X", "Y"}, {}, {"Out"}); @@ -162,12 +165,6 @@ KernelSignature ElementwiseDivGradOpArgumentMapping( {"X@GRAD", "Y@GRAD"}); } -KernelSignature ElementwiseFMinGradOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature( - "fmin_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"}); -} - KernelSignature ElementwiseDivDoubleGradOpArgumentMapping( const ArgumentMappingContext& ctx UNUSED) { return KernelSignature("divide_double_grad", @@ -209,12 +206,6 @@ KernelSignature ElementwiseMulTripleGradOpArgumentMapping( {"D_X", "D_Y", "D_DOut", "D_DDX", "D_DDY"}); } -KernelSignature ElementwiseMinGradOpArgumentMapping( - const ArgumentMappingContext& ctx UNUSED) { - return KernelSignature( - "minimum_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"}); -} - } // namespace phi PD_REGISTER_BASE_KERNEL_NAME(elementwise_add, add); @@ -237,8 +228,6 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_mul_grad_grad, multiply_double_grad); PD_REGISTER_BASE_KERNEL_NAME(elementwise_mul_triple_grad, multiply_triple_grad); PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmax, fmax); PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmin, fmin); -PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmin_grad, fmin_grad); -PD_REGISTER_BASE_KERNEL_NAME(elementwise_min_grad, minimum_grad); PD_REGISTER_ARG_MAPPING_FN(elementwise_add, phi::ElementwiseAddOpArgumentMapping); @@ -282,8 +271,4 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_fmax, phi::ElementwiseFMaxOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(elementwise_fmin, phi::ElementwiseFMinOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(elementwise_fmin_grad, - phi::ElementwiseFMinGradOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(elementwise_min_grad, - phi::ElementwiseMinGradOpArgumentMapping); PD_REGISTER_ARG_MAPPING_FN(grad_add, phi::ElementwiseGradAddOpArgumentMapping); -- GitLab