未验证 提交 90c3bddf 编写于 作者: G gouzil 提交者: GitHub

Autogen code bilinear_tensor_product (#52690)

* add autogen code bilinear_tensor_product

* [phi] rm cc file
上级 3ee2b237
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/multiary.h"
namespace paddle {
namespace operators {
class BilinearTensorProductOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
class BilinearTensorProductOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "The first input of bilinear_tensor_product operator.");
AddInput("Y", "The second input of bilinear_tensor_product operator.");
AddInput("Weight",
"The learnable parameters of bilinear_tensor_product operator.");
AddInput("Bias", "The learnable bias of bilinear_tensor_product operator.")
.AsDispensable();
AddOutput("Out", "The output of bilinear_tensor_product operator.");
AddComment(R"DOC(
Bilinear Tensor Product operator.
Given input X and Y, a 3D tensor Weight and a Bias. Each column of the
Output is computed by one slice $i = 1, . . . , k$ of the tensor:
$$
M = (X W_i) * Y \\
Out_i = \sum_j {M_j} + Bias_i
$$
Where $W_i$ is the $i$-th slice of Input(Weight);
$M_j$ is the $j$-th column of $M$;
$Out_i$ is the $i$-th column of Output(Out);
$Bias_i$ is a column vector, each element of it is equal to
the $i$-th element of $Bias$;
)DOC");
}
};
class BilinearTensorProductOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
template <typename T>
class BilinearTensorProductGradOpMaker
: public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("bilinear_tensor_product_grad");
op->SetAttrMap(this->Attrs());
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput("Weight", this->Input("Weight"));
if (this->HasInput("Bias")) {
op->SetOutput(framework::GradVarName("Bias"), this->InputGrad("Bias"));
}
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetOutput(framework::GradVarName("Weight"), this->InputGrad("Weight"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(bilinear_tensor_product,
BilinearTensorProductInferShapeFunctor,
PD_INFER_META(phi::BilinearInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(
bilinear_tensor_product_grad,
BilinearTensorProductGradInferShapeFunctor,
PD_INFER_META(phi::BilinearTensorProductGradInferMeta));
REGISTER_OPERATOR(
bilinear_tensor_product,
ops::BilinearTensorProductOp,
ops::BilinearTensorProductOpMaker,
ops::BilinearTensorProductGradOpMaker<paddle::framework::OpDesc>,
ops::BilinearTensorProductGradOpMaker<paddle::imperative::OpBase>,
BilinearTensorProductInferShapeFunctor);
REGISTER_OPERATOR(bilinear_tensor_product_grad,
ops::BilinearTensorProductOpGrad,
BilinearTensorProductGradInferShapeFunctor);
......@@ -173,6 +173,15 @@
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- backward_op : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad)
infer_meta :
func : BilinearTensorProductGradInferMeta
kernel :
func : bilinear_grad
- backward_op : bmm_grad
forward : bmm (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
......
......@@ -122,15 +122,6 @@
composite: batch_norm_grad(x, scale, bias, mean_out, variance_out, saved_mean, saved_variance, reserve_space, out_grad, momentum, epsilon, data_layout, is_test, use_global_stats, trainable_statistics)
backward : batch_norm_double_grad
- backward_op : bilinear_tensor_product_grad
forward : bilinear_tensor_product (Tensor x, Tensor y, Tensor weight, Tensor bias) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor weight, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad), Tensor(weight_grad), Tensor(bias_grad)
infer_meta :
func : BilinearTensorProductGradInferMeta
kernel :
func : bilinear_grad
- backward_op : cast_grad
forward : cast (Tensor x, DataType dtype) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......
......@@ -186,16 +186,6 @@
view : (mean -> mean_out), (variance -> variance_out)
backward : batch_norm_grad
- op : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
infer_meta :
func : BilinearInferMeta
kernel :
func : bilinear
optional : bias
backward : bilinear_tensor_product_grad
- op : bincount
args: (Tensor x, Tensor weights, Scalar(int) minlength = 0)
output: Tensor(out)
......
......@@ -262,6 +262,12 @@
extra :
attrs : [bool use_mkldnn = false]
- op : bilinear_tensor_product
inputs :
{x : X, y : Y,weight: Weight, bias: Bias}
outputs :
{out : Out}
- op : bitwise_and
inputs :
{x : X, y : Y}
......
......@@ -218,6 +218,16 @@
data_transform :
skip_transform : out_size, size_tensor, scale_tensor
- op : bilinear_tensor_product
args : (Tensor x, Tensor y, Tensor weight, Tensor bias)
output : Tensor
infer_meta :
func : BilinearInferMeta
kernel :
func : bilinear
optional : bias
backward : bilinear_tensor_product_grad
- op : bitwise_and
args : (Tensor x, Tensor y)
output : Tensor(out)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature BilinearTensorProductOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bilinear", {"X", "Y", "Weight", "Bias"}, {}, {"Out"});
}
KernelSignature BilinearTensorProductGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("bilinear_grad",
{"X", "Y", "Weight", "Out@GRAD"},
{},
{"X@GRAD", "Y@GRAD", "Weight@GRAD", "Bias@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(bilinear_tensor_product,
phi::BilinearTensorProductOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(bilinear_tensor_product_grad,
phi::BilinearTensorProductGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册