未验证 提交 48ef3d78 编写于 作者: L lzydev 提交者: GitHub

suppot auto-gen norm (#54943)

上级 150ca413
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Indicesou may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class NormOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor) A tensor of rank >= axis.");
AddAttr<int>("axis",
"The axis on which to apply normalization. If axis < 0, "
"the dimension to normalization is rank(X) + axis. -1 is "
"the last dimension.");
AddAttr<float>("epsilon",
"(float, default 1e-10) The epsilon value is used "
"to avoid division by zero.")
.SetDefault(1.0e-10f);
AddOutput("Norm",
"(Tensor) A tensor saved the `sqrt(sum(x) + epsion)` will "
"be used in backward kernel.")
.AsIntermediate()
.AsExtra();
AddAttr<bool>("is_test",
"(bool, default false) Set to true for inference only, false "
"for training.")
.SetDefault(false);
AddOutput("Out", "(Tensor) A tensor of the same shape as X.");
AddComment(R"DOC(
Given a tensor, apply 2-normalization along the provided axis.
$$
y = \frac{x}{ \sqrt{\sum {x^2} + epsion }}
$$
where, $\sum {x^2}$ is calculated along the `axis` dimension.
)DOC");
}
};
class NormOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;
};
class NormOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput("X"), "Input", "X", "NormOpGrad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")),
"Input",
"X@GRAD",
"NormOpGrad");
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
};
template <typename T>
class NormOpGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("norm_grad");
op->SetAttrMap(this->Attrs());
op->SetInput("X", this->Input("X"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetInput("Norm", this->Output("Norm"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
using CPU = phi::CPUContext;
DECLARE_INFER_SHAPE_FUNCTOR(norm,
NormInferShapeFunctor,
PD_INFER_META(phi::NormInferMeta));
REGISTER_OPERATOR(norm,
ops::NormOp,
ops::NormOpMaker,
ops::NormOpGradOpMaker<paddle::framework::OpDesc>,
ops::NormOpGradOpMaker<paddle::imperative::OpBase>,
NormInferShapeFunctor);
REGISTER_OPERATOR(norm_grad, ops::NormOpGrad);
......@@ -1888,6 +1888,15 @@
outputs :
out : Out
- op : norm
backward : norm_grad
inputs :
x : X
outputs :
{out : Out, norm : Norm}
extra :
outputs : [norm]
- op : not_equal
inputs :
{x : X, y : Y}
......
......@@ -188,6 +188,16 @@
kernel :
func : min_grad
- backward_op : norm_grad
forward : norm (Tensor x, int axis, float epsilon=1.0e-10f, bool is_test=false) -> Tensor(out), Tensor(norm)
args : (Tensor x, Tensor norm, Tensor out_grad, int axis, float epsilon, bool is_test)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : norm_grad
- backward_op : pool2d_double_grad
forward : pool2d_grad(Tensor x, Tensor out, Tensor grad_out, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm) -> Tensor(grad_x)
args : (Tensor grad_x_grad, IntArray kernel_size, int[] strides, int[] paddings, bool ceil_mode, bool exclusive, str data_format, str pooling_type, bool global_pooling, bool adaptive, str padding_algorithm)
......
......@@ -369,6 +369,16 @@
param : [x, axis, keepdim, reduce_all]
backward : min_grad
- op : norm
args : (Tensor x, int axis, float epsilon=1.0e-10f, bool is_test=false)
output : Tensor(out), Tensor(norm)
infer_meta :
func : NormInferMeta
kernel :
func : norm
backward : norm_grad
intermediate : norm
- op : not_equal
args : (Tensor x, Tensor y, int axis = -1, bool force_cpu=false)
output : Tensor(out)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature NormOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature(
"norm", {"X"}, {"axis", "epsilon", "is_test"}, {"Out", "Norm"});
}
KernelSignature NormGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("norm_grad",
{"X", "Norm", "Out@GRAD"},
{"axis", "epsilon", "is_test"},
{"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(norm, phi::NormOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(norm_grad, phi::NormGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册