未验证 提交 259b18a7 编写于 作者: R RedContritio 提交者: GitHub

support auto generate for op elementwise_heaviside (#55029)

上级 4a143fe0
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_op.h"
namespace paddle {
namespace operators {
class ElementwiseHeavisideOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Heaviside"; }
std::string GetEquation() const override { return "Out = Heaviside(X, Y)"; }
void AddInputX() override {
AddInput("X",
"(Tensor), The input tensor of Heaviside step function. "
"Its dtype can be int32, int64, float32 and float64");
}
void AddInputY() override {
AddInput("Y",
"(Tensor), The tensor determining a Heaviside step function, "
"which is the value when X = 0. Its dtype should be same as X.");
}
std::string GetOpFunctionality() const override {
return "Computes the Heaviside step function determined by Y "
"for each element in X.";
}
};
template <typename T>
class ElementwiseHeavisideGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("elementwise_heaviside_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetAttrMap(this->Attrs());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
REGISTER_OPERATOR(
elementwise_heaviside,
ops::ElementwiseOp,
ops::ElementwiseHeavisideOpMaker,
ops::ElementwiseHeavisideGradOpMaker<paddle::framework::OpDesc>,
ops::ElementwiseHeavisideGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(elementwise_heaviside_grad, ops::ElementwiseOpGrad);
......@@ -994,6 +994,17 @@
func : hardtanh_grad
inplace : (out_grad -> x_grad)
- backward_op : heaviside_grad
forward : heaviside (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : heaviside_grad
data_type : out_grad
- backward_op : huber_loss_grad
forward : huber_loss (Tensor input, Tensor label, float delta) -> Tensor(out), Tensor(residual)
args : (Tensor residual, Tensor out_grad, float delta)
......
......@@ -263,16 +263,6 @@
func : hardswish_grad
inplace : (out_grad -> x_grad)
- backward_op : heaviside_grad
forward : heaviside (Tensor x, Tensor y) -> Tensor(out)
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param : [x, y]
kernel :
func : heaviside_grad
- backward_op : hsigmoid_loss_grad
forward : hsigmoid_loss (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool is_sparse) -> Tensor(out), Tensor(pre_out), Tensor(w_out)
args : (Tensor x, Tensor w, Tensor label, Tensor path, Tensor code, Tensor bias, Tensor pre_out, Tensor out_grad, int num_classes, bool is_sparse)
......
......@@ -435,15 +435,6 @@
func : hardswish
backward : hardswish_grad
- op : heaviside
args : (Tensor x, Tensor y)
output : Tensor
infer_meta :
func : ElementwiseInferMeta
kernel :
func : heaviside
backward : heaviside_grad
- op : hsigmoid_loss
args : (Tensor x, Tensor label, Tensor w, Tensor bias, Tensor path, Tensor code, int num_classes, bool is_sparse)
output : Tensor(out), Tensor(pre_out), Tensor(w_out)
......
......@@ -1284,9 +1284,14 @@
- op : heaviside (elementwise_heaviside)
backward : heaviside_grad (elementwise_heaviside_grad)
inputs :
{x : X, y : Y}
outputs :
{out : Out}
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
complex_promote : [X, Y]
- op : histogram
inputs :
......
......@@ -1081,6 +1081,15 @@
func : hardtanh
backward : hardtanh_grad
- op : heaviside
args : (Tensor x, Tensor y)
output : Tensor(out)
infer_meta :
func : ElementwiseInferMeta
kernel :
func : heaviside
backward : heaviside_grad
- op : histogram
args : (Tensor input, int64_t bins = 100, int min = 0, int max = 0)
output : Tensor(out)
......
......@@ -109,11 +109,6 @@ KernelSignature ElementwiseFloorDivOpArgumentMapping(
return KernelSignature("floor_divide_raw", {"X", "Y"}, {"axis"}, {"Out"});
}
KernelSignature ElementwiseHeavisideOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature("heaviside", {"X", "Y"}, {}, {"Out"});
}
KernelSignature ElementwisePowOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
......@@ -216,12 +211,6 @@ KernelSignature ElementwiseMinGradOpArgumentMapping(
"minimum_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"});
}
KernelSignature ElementwiseHeavisideGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature(
"heaviside_grad", {"X", "Y", "Out@GRAD"}, {}, {"X@GRAD", "Y@GRAD"});
}
KernelSignature ElementwisePowGradOpArgumentMapping(
const ArgumentMappingContext& ctx UNUSED) {
return KernelSignature(
......@@ -237,7 +226,6 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_max, maximum);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_min, minimum);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_mod, remainder);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_floordiv, floor_divide);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_heaviside, heaviside);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad, add_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_grad_grad, add_double_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_add_triple_grad, add_triple_grad);
......@@ -252,7 +240,6 @@ PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmax, fmax);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmin, fmin);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_fmin_grad, fmin_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_min_grad, minimum_grad);
PD_REGISTER_BASE_KERNEL_NAME(elementwise_heaviside_grad, heaviside_grad);
PD_REGISTER_ARG_MAPPING_FN(elementwise_add,
phi::ElementwiseAddOpArgumentMapping);
......@@ -270,8 +257,6 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_mod,
phi::ElementwiseModOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_floordiv,
phi::ElementwiseFloorDivOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_heaviside,
phi::ElementwiseHeavisideOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_pow,
phi::ElementwisePowOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_add_grad,
......@@ -302,8 +287,6 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_fmin_grad,
phi::ElementwiseFMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_min_grad,
phi::ElementwiseMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_heaviside_grad,
phi::ElementwiseHeavisideGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_pow_grad,
phi::ElementwisePowGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(grad_add, phi::ElementwiseGradAddOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册