未验证 提交 b86bbe85 编写于 作者: C cyberslack_lee 提交者: GitHub

support auto generation V2 abs (#53341)

上级 7b81092b
// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h"
#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h"
#include "paddle/fluid/prim/utils/static/desc_tensor.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class AbsOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto input_data_type =
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(input_data_type, ctx.GetPlace());
}
};
class AbsOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "(Tensor), The input tensor of abs op.");
AddOutput("Out", "(Tensor), The output tensor of abs op.");
AddComment(R"DOC(
Abs Operator.
This operator is used to perform elementwise abs for input $X$.
$$out = |x|$$
)DOC");
}
};
class AbsGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasInput(framework::GradVarName("Out")),
"Input",
"Out@Grad",
"AbsGrad");
OP_INOUT_CHECK(ctx->HasOutput(framework::GradVarName("X")),
"Output",
"X@Grad",
"AbsGrad");
auto dout_dims = ctx->GetInputDim(framework::GradVarName("Out"));
ctx->SetOutputDim(framework::GradVarName("X"), dout_dims);
}
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto input_data_type =
framework::OperatorWithKernel::IndicateVarDataType(ctx, "X");
return phi::KernelKey(input_data_type, ctx.GetPlace());
}
};
template <typename T>
class AbsGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("abs_grad");
retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
retv->SetInput("X", this->Input("X"));
retv->SetAttrMap(this->Attrs());
retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
}
};
class AbsCompositeGradOpMaker : public prim::CompositeGradOpMakerBase {
using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase;
public:
void Apply() override {
paddle::Tensor input = this->GetSingleForwardInput("X");
paddle::Tensor out_grad = this->GetSingleOutputGrad("Out");
paddle::Tensor input_grad = this->GetSingleInputGrad("X");
auto dx_ptr = this->GetOutputPtr(&input_grad);
std::string dx_name = this->GetOutputName(input_grad);
VLOG(6) << "Running abs_grad composite func";
prim::abs_grad<prim::DescTensor>(input, out_grad, dx_ptr);
this->RecoverOutputName(input_grad, dx_name);
}
};
// AbsGrad: dx=dy if x >=0 else -dy
// AbsDoubleGrad: ddy = ddx if x >=0 else -ddx
template <typename T>
class AbsDoubleGradMaker : public framework::SingleGradOpMaker<T> {
public:
using ::paddle::framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("abs_double_grad");
// input1: x
op->SetInput("X", this->Input("X"));
// input2: ddx
op->SetInput("DDX", this->OutputGrad(framework::GradVarName("X")));
op->SetAttrMap(this->Attrs());
// output: ddy
op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
}
};
class AbsDoubleGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
if (ctx->HasOutput("DDOut")) {
ctx->ShareDim("X", "DDOut");
ctx->ShareLoD("X", "DDOut");
}
}
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
auto dtype = OperatorWithKernel::IndicateVarDataType(ctx, "DDX");
return phi::KernelKey(dtype, ctx.GetPlace());
}
phi::KernelKey GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const phi::KernelKey& expected_kernel_type) const override {
return phi::KernelKey(tensor.place(), tensor.layout(), tensor.dtype());
}
};
} // namespace operators
} // namespace paddle
DECLARE_INFER_SHAPE_FUNCTOR(abs,
AbsInferShapeFunctor,
PD_INFER_META(phi::RealAndImagInferMeta));
namespace ops = paddle::operators;
REGISTER_OPERATOR(abs,
ops::AbsOp,
ops::AbsOpMaker,
ops::AbsCompositeGradOpMaker,
ops::AbsGradMaker<paddle::framework::OpDesc>,
ops::AbsGradMaker<paddle::imperative::OpBase>,
AbsInferShapeFunctor);
REGISTER_OPERATOR(abs_grad,
ops::AbsGradOp,
ops::AbsDoubleGradMaker<paddle::framework::OpDesc>,
ops::AbsDoubleGradMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(abs_double_grad, ops::AbsDoubleGradOp);
# This file is designed for backward C++ operators associated with # This file is designed for backward C++ operators associated with
# the operator in ops.yaml. # the operator in ops.yaml.
- backward_op : abs_double_grad
forward : abs_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_x_grad)
output : Tensor(grad_out_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
data_transform :
support_trans_dtype : x, grad_x_grad
kernel :
func : abs_double_grad
data_type : grad_x_grad
- backward_op : abs_grad
forward : abs (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : abs_grad
data_type : x
composite : abs_grad(x, out_grad, x_grad)
backward : abs_double_grad
- backward_op : acos_grad - backward_op : acos_grad
forward : acos (Tensor x) -> Tensor(out) forward : acos (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad) args : (Tensor x, Tensor out_grad)
......
- backward_op : abs_double_grad
forward : abs_grad (Tensor x, Tensor grad_out) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_x_grad)
output : Tensor(grad_out_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : abs_double_grad
- backward_op : abs_grad
forward : abs (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : abs_grad
composite : abs_grad(x, out_grad, x_grad)
backward : abs_double_grad
- backward_op : add_double_grad - backward_op : add_double_grad
forward : add_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y) forward : add_grad (Tensor x, Tensor y, Tensor grad_out, int axis = -1) -> Tensor(grad_x), Tensor(grad_y)
args : (Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1) args : (Tensor y, Tensor grad_out, Tensor grad_x_grad, Tensor grad_y_grad, int axis = -1)
......
# The apis in this file are unstandardized that may caused by a variety of reasons, # The apis in this file are unstandardized that may caused by a variety of reasons,
# we are trying to fix these apis and will move standardized apis into ops.yaml. # we are trying to fix these apis and will move standardized apis into ops.yaml.
- op : abs
args : (Tensor x)
output : Tensor
infer_meta :
func : RealAndImagInferMeta
kernel :
func : abs
backward : abs_grad
- op : adadelta_ - op : adadelta_
args : (Tensor param, Tensor grad, Tensor avg_squared_grad, Tensor avg_squared_update, Tensor learning_rate, Tensor master_param, float rho, float epsilon, bool multi_precision) args : (Tensor param, Tensor grad, Tensor avg_squared_grad, Tensor avg_squared_update, Tensor learning_rate, Tensor master_param, float rho, float epsilon, bool multi_precision)
output : Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out), Tensor(master_param_out) output : Tensor(param_out), Tensor(moment_out), Tensor(inf_norm_out), Tensor(master_param_out)
......
...@@ -5,6 +5,16 @@ ...@@ -5,6 +5,16 @@
# are consistent and correspond one-to-one. It's forbidden that the # are consistent and correspond one-to-one. It's forbidden that the
# operator configured in this yaml file does not have Python API. # operator configured in this yaml file does not have Python API.
- op : abs
args : (Tensor x)
output : Tensor(out)
infer_meta :
func : RealAndImagInferMeta
kernel :
func : abs
data_type : x
backward : abs_grad
- op : accuracy - op : accuracy
args : (Tensor x, Tensor indices, Tensor label) args : (Tensor x, Tensor indices, Tensor label)
output : Tensor(accuracy), Tensor(correct), Tensor(total) output : Tensor(accuracy), Tensor(correct), Tensor(total)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature AbsOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("abs", {"X"}, {}, {"Out"});
}
KernelSignature AbsGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("abs_grad", {"X", "Out@GRAD"}, {}, {"X@GRAD"});
}
KernelSignature AbsDoubleGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("abs_double_grad", {"X", "DDX"}, {}, {"DDOut"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(abs, phi::AbsOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(abs_grad, phi::AbsGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(abs_double_grad,
phi::AbsDoubleGradOpArgumentMapping);
...@@ -308,12 +308,28 @@ def generate_activation_fn(op_type): ...@@ -308,12 +308,28 @@ def generate_activation_fn(op_type):
return output return output
func.__name__ = op_type func.__name__ = op_type
func.__doc__ = _generate_doc_string_( if op_type == 'abs':
op_proto, func.__doc__ = r"""
additional_args_lines=[
"name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`." Abs Operator.
], Perform elementwise abs for input `X`.
)
.. math::
out = |x|
Args:
x (Tensor): The input tensor of abs op.
out (Tensor): The output tensor of abs op.
name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
"""
else:
func.__doc__ = _generate_doc_string_(
op_proto,
additional_args_lines=[
"name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`."
],
)
return func return func
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册