未验证 提交 2530b05e 编写于 作者: R RedContritio 提交者: GitHub

support auto generate for static op elementwise_pow (#55023)

* configure elementwise_pow op_version

* support auto generate for static op elementwise_pow

* pre-commit run
上级 993c163e
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
#include "paddle/fluid/prim/api/composite_backward/composite_backward_api.h"
#include "paddle/fluid/prim/utils/static/composite_grad_desc_maker.h"
#include "paddle/fluid/prim/utils/static/desc_tensor.h"
namespace paddle {
namespace framework {
class OpDesc;
} // namespace framework
namespace imperative {
class OpBase;
} // namespace imperative
} // namespace paddle
namespace paddle {
namespace operators {
template <typename T>
class ElementwisePowOpGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("elementwise_pow_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetAttrMap(this->Attrs());
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
}
};
class ElementwisePowCompositeGradOpMaker
: public prim::CompositeGradOpMakerBase {
using prim::CompositeGradOpMakerBase::CompositeGradOpMakerBase;
public:
void Apply() override {
paddle::Tensor x = this->GetSingleForwardInput("X");
paddle::Tensor y = this->GetSingleForwardInput("Y");
paddle::Tensor out_grad = this->GetSingleOutputGrad("Out");
paddle::Tensor dx = this->GetSingleInputGrad("X");
auto dx_ptr = this->GetOutputPtr(&dx);
std::string dx_name = this->GetOutputName(dx);
paddle::Tensor dy = this->GetSingleInputGrad("Y");
auto dy_ptr = this->GetOutputPtr(&dy);
std::string dy_name = this->GetOutputName(dy);
prim::elementwise_pow_grad<prim::DescTensor>(
x, y, out_grad, dx_ptr, dy_ptr);
this->RecoverOutputName(dx, dx_name);
this->RecoverOutputName(dy, dy_name);
}
};
class ElementwisePowOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Pow"; }
std::string GetEquation() const override { return "Out = X ^ Y"; }
void AddInputX() override { AddInput("X", "(Variable), The Base."); }
void AddInputY() override { AddInput("Y", "(Variable), The exponents."); }
std::string GetOpFunctionality() const override {
return "First tensor elements raised to powers from the second tensor, "
"element-wise.";
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(elementwise_pow,
ops::ElementwiseOp,
ops::ElementwisePowOpMaker,
ops::ElementwiseOpInferVarType,
ops::ElementwisePowOpGradMaker<paddle::framework::OpDesc>,
ops::ElementwisePowOpGradMaker<paddle::imperative::OpBase>,
ops::ElementwisePowCompositeGradOpMaker);
REGISTER_OPERATOR(elementwise_pow_grad, ops::ElementwiseOpGrad);
REGISTER_OP_VERSION(elementwise_pow)
.AddCheckpoint(
R"ROC(Register elementwise_pow for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_pow.",
1.0f));
......@@ -5,13 +5,8 @@
# If there are some redefined error in compiling with the source file which
# in combination rule, you can remove the source file from the following rules.
register_unity_group(
cc
elementwise_add_op.cc
elementwise_div_op.cc
elementwise_min_op.cc
elementwise_mul_op.cc
elementwise_pow_op.cc
elementwise_sub_op.cc)
cc elementwise_add_op.cc elementwise_div_op.cc elementwise_min_op.cc
elementwise_mul_op.cc elementwise_sub_op.cc)
register_unity_group(
cu
elementwise_add_op.cu
......
......@@ -843,9 +843,15 @@
- op : elementwise_pow
backward : elementwise_pow_grad
inputs :
{x : X, y : Y}
outputs :
{out : Out}
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
complex_promote : [X, Y]
manual_signature : [elementwise_pow]
- op : elu
backward : elu_grad, elu_double_grad (elu_grad_grad)
......
......@@ -189,6 +189,14 @@
comment : In order to support the function of scaling the input Y when using the operator of elementwise_mod.
default : "false"
- op : elementwise_pow
version :
- checkpoint : Register elementwise_pow for adding the attribute of Scale_y
action :
- add_attr : Scale_y
comment : In order to support the function of scaling the input Y when using the operator of elementwise_pow.
default : 1.0
- op : embedding
version :
- checkpoint : Upgrade flip, add new attr [axis] and delete attr [dims]
......
......@@ -85,6 +85,18 @@
func : einsum_grad
data_type : out_grad
- backward_op : elementwise_pow_grad
forward : elementwise_pow(Tensor x, Tensor y, int axis = -1) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, y]
kernel :
func : elementwise_pow_grad
data_type : out_grad
composite : elementwise_pow_grad(x, y, out_grad, x_grad, y_grad)
- backward_op : embedding_grad
forward : embedding (Tensor x, Tensor weight, int64_t padding_idx=-1) -> Tensor(out)
args : (Tensor x, Tensor weight, Tensor out_grad, int64_t padding_idx=-1)
......
......@@ -167,6 +167,15 @@
backward : einsum_grad
intermediate : inner_cache, xshape
- op : elementwise_pow
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor(out)
infer_meta :
func : ElementwiseRawInferMeta
kernel :
func : elementwise_pow
backward : elementwise_pow_grad
- op : embedding
args : (Tensor x, Tensor weight, int64_t padding_idx=-1)
output : Tensor
......
......@@ -111,6 +111,10 @@ KernelSignature ElementwiseFloorDivOpArgumentMapping(
KernelSignature ElementwisePowOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
if (ctx.IsForInferShape()) {
return KernelSignature(
"elementwise_pow_raw", {"X", "Y"}, {"axis"}, {"Out"});
}
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
if (axis == -1) {
return KernelSignature("elementwise_pow", {"X", "Y"}, {}, {"Out"});
......@@ -211,11 +215,6 @@ KernelSignature ElementwiseMinGradOpArgumentMapping(
"minimum_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"});
}
KernelSignature ElementwisePowGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature(
"elementwise_pow_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"});
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add, add);
......@@ -287,6 +286,4 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_fmin_grad,
phi::ElementwiseFMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_min_grad,
phi::ElementwiseMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_pow_grad,
phi::ElementwisePowGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(grad_add, phi::ElementwiseGradAddOpArgumentMapping);
......@@ -30,7 +30,6 @@ cc_test_old(
operator
elementwise_mul_op
elementwise_sub_op
elementwise_pow_op
fill_constant_op
activation_op
phi
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册