From 90c3bddfda207c9b5197a7db99cf98707fdb7a7c Mon Sep 17 00:00:00 2001 From: gouzil <66515297+gouzil@users.noreply.github.com> Date: Mon, 10 Apr 2023 14:39:02 +0800 Subject: [PATCH] Autogen code bilinear_tensor_product (#52690) * add autogen code bilinear_tensor_product * [phi] rm cc file --- .../operators/bilinear_tensor_product_op.cc | 110 ------------------ paddle/phi/api/yaml/backward.yaml | 9 ++ paddle/phi/api/yaml/legacy_backward.yaml | 9 -- paddle/phi/api/yaml/legacy_ops.yaml | 10 -- paddle/phi/api/yaml/op_compat.yaml | 6 + paddle/phi/api/yaml/ops.yaml | 10 ++ .../ops/compat/bilinear_tensor_product_sig.cc | 37 ------ 7 files changed, 25 insertions(+), 166 deletions(-) delete mode 100644 paddle/fluid/operators/bilinear_tensor_product_op.cc delete mode 100644 paddle/phi/ops/compat/bilinear_tensor_product_sig.cc diff --git a/paddle/fluid/operators/bilinear_tensor_product_op.cc b/paddle/fluid/operators/bilinear_tensor_product_op.cc deleted file mode 100644 index 00586c4e1e4..00000000000 --- a/paddle/fluid/operators/bilinear_tensor_product_op.cc +++ /dev/null @@ -1,110 +0,0 @@ -/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#include "paddle/fluid/framework/infershape_utils.h" -#include "paddle/fluid/framework/op_registry.h" -#include "paddle/phi/core/infermeta_utils.h" -#include "paddle/phi/infermeta/backward.h" -#include "paddle/phi/infermeta/multiary.h" - -namespace paddle { -namespace operators { - -class BilinearTensorProductOp : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; -}; - -class BilinearTensorProductOpMaker : public framework::OpProtoAndCheckerMaker { - public: - void Make() override { - AddInput("X", "The first input of bilinear_tensor_product operator."); - AddInput("Y", "The second input of bilinear_tensor_product operator."); - AddInput("Weight", - "The learnable parameters of bilinear_tensor_product operator."); - AddInput("Bias", "The learnable bias of bilinear_tensor_product operator.") - .AsDispensable(); - AddOutput("Out", "The output of bilinear_tensor_product operator."); - AddComment(R"DOC( -Bilinear Tensor Product operator. -Given input X and Y, a 3D tensor Weight and a Bias. Each column of the -Output is computed by one slice $i = 1, . . . , k$ of the tensor: - -$$ -M = (X W_i) * Y \\ -Out_i = \sum_j {M_j} + Bias_i -$$ - -Where $W_i$ is the $i$-th slice of Input(Weight); - $M_j$ is the $j$-th column of $M$; - $Out_i$ is the $i$-th column of Output(Out); - $Bias_i$ is a column vector, each element of it is equal to - the $i$-th element of $Bias$; - -)DOC"); - } -}; - -class BilinearTensorProductOpGrad : public framework::OperatorWithKernel { - public: - using framework::OperatorWithKernel::OperatorWithKernel; -}; - -template -class BilinearTensorProductGradOpMaker - : public framework::SingleGradOpMaker { - public: - using framework::SingleGradOpMaker::SingleGradOpMaker; - - protected: - void Apply(GradOpPtr op) const override { - op->SetType("bilinear_tensor_product_grad"); - op->SetAttrMap(this->Attrs()); - op->SetInput("X", this->Input("X")); - op->SetInput("Y", this->Input("Y")); - op->SetInput("Weight", this->Input("Weight")); - if (this->HasInput("Bias")) { - op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias")); - } - - op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y")); - op->SetOutput(framework::GradVarName("Weight"), this->InputGrad("Weight")); - op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); - } -}; - -} // namespace operators -} // namespace paddle - -namespace ops = paddle::operators; - -DECLARE_INFER_SHAPE_FUNCTOR(bilinear_tensor_product, - BilinearTensorProductInferShapeFunctor, - PD_INFER_META(phi::BilinearInferMeta)); -DECLARE_INFER_SHAPE_FUNCTOR( - bilinear_tensor_product_grad, - BilinearTensorProductGradInferShapeFunctor, - PD_INFER_META(phi::BilinearTensorProductGradInferMeta)); - -REGISTER_OPERATOR( - bilinear_tensor_product, - ops::BilinearTensorProductOp, - ops::BilinearTensorProductOpMaker, - ops::BilinearTensorProductGradOpMaker, - ops::BilinearTensorProductGradOpMaker, - BilinearTensorProductInferShapeFunctor); -REGISTER_OPERATOR(bilinear_tensor_product_grad, - ops::BilinearTensorProductOpGrad, - BilinearTensorProductGradInferShapeFunctor); diff --git a/paddle/phi/api/yaml/backward.yaml b/paddle/phi/api/yaml/backward.yaml index 2b49b6950f6..7116b2be70d 100644 --- a/paddle/phi/api/yaml/backward.yaml +++ b/paddle/phi/api/yaml/backward.yaml @@ -173,6 +173,15 @@ data_transform : skip_transform : out_size, size_tensor, scale_tensor +- backward_op : bilinear_tensor_product_grad + forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out) + args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad) + output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad) + infer_meta : + func : BilinearTensorProductGradInferMeta + kernel : + func : bilinear_grad + - backward_op : bmm_grad forward : bmm (Tensor x, Tensor y) -> Tensor(out) args : (Tensor x, Tensor y, Tensor out_grad) diff --git a/paddle/phi/api/yaml/legacy_backward.yaml b/paddle/phi/api/yaml/legacy_backward.yaml index 25582edca0c..6ba507312b3 100755 --- a/paddle/phi/api/yaml/legacy_backward.yaml +++ b/paddle/phi/api/yaml/legacy_backward.yaml @@ -122,15 +122,6 @@ composite: batch_norm_grad(x, scale, bias, mean_out, variance_out, saved_mean, saved_variance, reserve_space, out_grad, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics) backward : batch_norm_double_grad -- backward_op : bilinear_tensor_product_grad - forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out) - args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad) - output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad) - infer_meta : - func : BilinearTensorProductGradInferMeta - kernel : - func : bilinear_grad - - backward_op : cast_grad forward : cast (Tensor x, DataType dtype) -> Tensor(out) args : (Tensor x, Tensor out_grad) diff --git a/paddle/phi/api/yaml/legacy_ops.yaml b/paddle/phi/api/yaml/legacy_ops.yaml index 972f85070ba..32966d54e09 100755 --- a/paddle/phi/api/yaml/legacy_ops.yaml +++ b/paddle/phi/api/yaml/legacy_ops.yaml @@ -186,16 +186,6 @@ view : (mean -> mean_out), (variance -> variance_out) backward : batch_norm_grad -- op : bilinear_tensor_product - args : (Tensor x, Tensor y, Tensor weight, Tensor bias) - output : Tensor - infer_meta : - func : BilinearInferMeta - kernel : - func : bilinear - optional : bias - backward : bilinear_tensor_product_grad - - op : bincount args: (Tensor x, Tensor weights, Scalar(int) minlength = 0) output: Tensor(out) diff --git a/paddle/phi/api/yaml/op_compat.yaml b/paddle/phi/api/yaml/op_compat.yaml index e9a790e9128..f905b04c92d 100644 --- a/paddle/phi/api/yaml/op_compat.yaml +++ b/paddle/phi/api/yaml/op_compat.yaml @@ -262,6 +262,12 @@ extra : attrs : [bool use_mkldnn = false] +- op : bilinear_tensor_product + inputs : + {x : X, y : Y,weight: Weight, bias: Bias} + outputs : + {out : Out} + - op : bitwise_and inputs : {x : X, y : Y} diff --git a/paddle/phi/api/yaml/ops.yaml b/paddle/phi/api/yaml/ops.yaml index 5d5eb8d9fc3..e0598f15b58 100644 --- a/paddle/phi/api/yaml/ops.yaml +++ b/paddle/phi/api/yaml/ops.yaml @@ -218,6 +218,16 @@ data_transform : skip_transform : out_size, size_tensor, scale_tensor +- op : bilinear_tensor_product + args : (Tensor x, Tensor y, Tensor weight, Tensor bias) + output : Tensor + infer_meta : + func : BilinearInferMeta + kernel : + func : bilinear + optional : bias + backward : bilinear_tensor_product_grad + - op : bitwise_and args : (Tensor x, Tensor y) output : Tensor(out) diff --git a/paddle/phi/ops/compat/bilinear_tensor_product_sig.cc b/paddle/phi/ops/compat/bilinear_tensor_product_sig.cc deleted file mode 100644 index 54509e4b2de..00000000000 --- a/paddle/phi/ops/compat/bilinear_tensor_product_sig.cc +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -#include "paddle/phi/core/compat/op_utils.h" - -namespace phi { - -KernelSignature BilinearTensorProductOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("bilinear", {"X", "Y", "Weight", "Bias"}, {}, {"Out"}); -} - -KernelSignature BilinearTensorProductGradOpArgumentMapping( - const ArgumentMappingContext& ctx) { - return KernelSignature("bilinear_grad", - {"X", "Y", "Weight", "Out@GRAD"}, - {}, - {"X@GRAD", "Y@GRAD", "Weight@GRAD", "Bias@GRAD"}); -} - -} // namespace phi - -PD_REGISTER_ARG_MAPPING_FN(bilinear_tensor_product, - phi::BilinearTensorProductOpArgumentMapping); -PD_REGISTER_ARG_MAPPING_FN(bilinear_tensor_product_grad, - phi::BilinearTensorProductGradOpArgumentMapping); -- GitLab