未验证 提交 772d9481 编写于 作者: R RedContritio 提交者: GitHub

support auto generate for static op remainder (elementwise_mod) (#55024)

* configure elementwise_mod op_version

* support auto generate for static op elementwise_mod
上级 8d7a3f1c
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
namespace paddle {
namespace framework {
class OpDesc;
template <typename T>
class EmptyGradOpMaker;
} // namespace framework
namespace imperative {
class OpBase;
} // namespace imperative
} // namespace paddle
namespace paddle {
namespace operators {
class ElementwiseModOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Mod"; }
std::string GetEquation() const override { return "Out = X \\\\% Y"; }
void AddInputX() override {
AddInput("X",
"(Tensor), Tensor of any dimensions. Its dtype "
"should be int32, int64, float32 or float64.");
}
void AddInputY() override {
AddInput("Y",
"(Tensor), Tensor of any dimensions. Its dtype "
"should be int32, int64, float32 or float64.");
}
std::string GetOpFunctionality() const override {
return "Mod two tensors element-wise";
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OP_WITHOUT_GRADIENT(elementwise_mod,
ops::ElementwiseOp,
ops::ElementwiseModOpMaker,
ops::ElementwiseOpInplaceInferer);
REGISTER_OP_VERSION(elementwise_mod)
.AddCheckpoint(
R"ROC(Register elementwise_mod for adding the attribute of Scale_y)ROC",
paddle::framework::compatible::OpVersionDesc().NewAttr(
"Scale_y",
"In order to support the function of scaling the input Y when "
"using the operator of elementwise_mod.",
1.0f));
......@@ -9,7 +9,6 @@ register_unity_group(
elementwise_add_op.cc
elementwise_div_op.cc
elementwise_min_op.cc
elementwise_mod_op.cc
elementwise_mul_op.cc
elementwise_pow_op.cc
elementwise_sub_op.cc)
......
......@@ -2150,9 +2150,15 @@
attrs : [bool use_mkldnn = false]
- op : remainder (elementwise_mod)
inputs :
{x : X, y : Y}
outputs :
{out : Out}
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
complex_promote : [X, Y]
manual_signature : [remainder]
- op : renorm
backward : renorm_grad
......
......@@ -181,6 +181,14 @@
comment : In order to support the function of scaling the input Y when using the operator of elementwise_max.
default : 1.0
- op : elementwise_mod
version :
- checkpoint : Register elementwise_mod for adding the attribute of Scale_y
action :
- add_attr : Scale_y
comment : In order to support the function of scaling the input Y when using the operator of elementwise_mod.
default : "false"
- op : embedding
version :
- checkpoint : Upgrade flip, add new attr [axis] and delete attr [dims]
......
......@@ -526,6 +526,15 @@
func : relu6_raw
backward : relu6_grad
- op : remainder
args : (Tensor x, Tensor y, int axis = -1)
output : Tensor (out)
infer_meta :
func : ElementwiseRawInferMeta
kernel :
func : remainder
inplace : (x -> out)
- op : rnn
args: (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false)
output: Tensor(out), Tensor(dropout_state_out), Tensor[](state){pre_state.size()}, Tensor(reserve)
......
......@@ -87,6 +87,9 @@ KernelSignature ElementwiseMinOpArgumentMapping(
KernelSignature ElementwiseModOpArgumentMapping(
const ArgumentMappingContext& ctx) {
if (ctx.IsForInferShape()) {
return KernelSignature("remainder_raw", {"X", "Y"}, {"axis"}, {"Out"});
}
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
if (axis == -1) {
return KernelSignature("remainder", {"X", "Y"}, {}, {"Out"});
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册