未验证 提交 85490f70 编写于 作者: HappyHeavyRain's avatar HappyHeavyRain 提交者: GitHub

Generate some static graph ops (#49906)

* generate some static graph ops

* fix the bug of pow

* add REGISTER_ACTIVATION_OP in operators.cmake

* modify the file operators.cmake
上级 7e8ef328
......@@ -411,6 +411,17 @@ function(op_library TARGET)
set(pybind_flag 1)
endif()
# pybind USE_OP_ITSELF
set(op_name "")
# Add PHI Kernel Registry Message
find_register(${cc_src} "REGISTER_ACTIVATION_OP" op_name)
if(NOT ${op_name} EQUAL "")
file(APPEND ${pybind_file} "USE_OP_ITSELF(${op_name});\n")
# hack: for example, the target in conv_transpose_op.cc is conv2d_transpose, used in mkldnn
set(TARGET ${op_name})
set(pybind_flag 1)
endif()
set(op_name "")
find_register(${cc_src} "REGISTER_OP_WITHOUT_GRADIENT" op_name)
if(NOT ${op_name} EQUAL "")
......
......@@ -177,26 +177,6 @@ $$out = \min(\max(0, x), threshold)$$
}
};
class PowOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "Input of Pow operator");
AddInput("FactorTensor",
"(Tensor<float>, optional). If provided, pow will use this"
"The shape of FactorTensor MUST BE [1]."
"it has higher priority than attr(factor).")
.AsDispensable();
AddOutput("Out", "Output of Pow operator");
AddAttr<float>("factor", "The exponential factor of Pow").SetDefault(1.0f);
AddComment(R"DOC(
Pow Activation Operator.
$$out = x^{factor}$$
)DOC");
}
};
class STanhOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
......@@ -403,138 +383,6 @@ DECLARE_INPLACE_OP_INFERER(ActivationDoubleGradOpInplaceInferer,
DECLARE_INPLACE_OP_INFERER(ActivationTripleGradOpInplaceInferer,
{"DDX", "D_DOut"});
template <typename T>
class PowGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("pow_grad");
op->SetInput("X", this->Input("X"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework ::GradVarName("X"), this->InputGrad("X"));
op->SetInput("FactorTensor", this->Input("FactorTensor"));
op->SetAttrMap(this->Attrs());
}
};
template <typename T>
class PowDoubleGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("pow_double_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("DOut", this->Input(framework::GradVarName("Out")));
op->SetInput("DDX", this->OutputGrad(framework ::GradVarName("X")));
op->SetOutput("DX", this->InputGrad("X"));
op->SetOutput("DDOut", this->InputGrad(framework::GradVarName("Out")));
op->SetInput("FactorTensor", this->Input("FactorTensor"));
op->SetAttrMap(this->Attrs());
}
};
template <typename T>
class PowTripleGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("pow_triple_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("DOut", this->Input("DOut"));
op->SetInput("DDX", this->Input("DDX"));
op->SetInput("D_DX", this->OutputGrad("DX"));
op->SetInput("D_DDOut", this->OutputGrad("DDOut"));
op->SetOutput("D_X", this->InputGrad("X"));
op->SetOutput("D_DOut", this->InputGrad("DOut"));
op->SetOutput("D_DDX", this->InputGrad("DDX"));
op->SetInput("FactorTensor", this->Input("FactorTensor"));
op->SetAttrMap(this->Attrs());
}
};
class PowOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
ctx->ShareDim("X", /*->*/ "Out");
ctx->ShareLoD("X", /*->*/ "Out");
}
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this, "X");
}
phi::KernelKey GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const phi::KernelKey& expected_kernel_type) const override {
if (var_name == "FactorTensor") {
return phi::KernelKey(phi::Backend::ALL_BACKEND,
expected_kernel_type.layout(),
expected_kernel_type.dtype());
}
return phi::KernelKey(
tensor.place(), tensor.layout(), expected_kernel_type.dtype());
}
};
class PowOpGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
auto out_grad_name = framework::GradVarName("Out");
ctx->ShareDim(out_grad_name, framework::GradVarName("X"));
ctx->ShareLoD(out_grad_name, framework::GradVarName("X"));
}
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this, framework::GradVarName("Out"));
}
phi::KernelKey GetKernelTypeForVar(
const std::string& var_name,
const phi::DenseTensor& tensor,
const phi::KernelKey& expected_kernel_type) const override {
if (var_name == "FactorTensor") {
return phi::KernelKey(phi::Backend::ALL_BACKEND,
expected_kernel_type.layout(),
expected_kernel_type.dtype());
}
return phi::KernelKey(
tensor.place(), tensor.layout(), expected_kernel_type.dtype());
}
};
class PowOpDoubleGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this, "X");
}
};
class PowOpTripleGrad : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return GetKernelType(ctx, *this, "X");
}
};
DECLARE_INPLACE_OP_INFERER(ActFwdInplaceInferer, {"X", "Out"});
} // namespace operators
} // namespace paddle
......@@ -582,40 +430,6 @@ REGISTER_ACTIVATION_OP(hard_swish,
HardSwishGradFunctor);
REGISTER_ACTIVATION_OP(swish, Swish, SwishFunctor, SwishGradFunctor);
/* ========================== pow register ============================ */
DECLARE_INFER_SHAPE_FUNCTOR(pow_double_grad,
PowDoubleGradInferShapeFunctor,
PD_INFER_META(phi::GeneralBinaryGradInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(pow_triple_grad,
PowTripleGradInferShapeFunctor,
PD_INFER_META(phi::GeneralTernaryGradInferMeta));
REGISTER_OPERATOR(
pow,
ops::PowOp,
ops::PowOpMaker,
ops::ActivationOpInferVarType,
ops::PowGradOpMaker<paddle::framework::OpDesc>,
ops::PowGradOpMaker<paddle::imperative::OpBase>,
std::conditional<ops::CanInplaceAct<ops::PowGradFunctor<float>>(),
ops::ActFwdInplaceInferer,
void>::type);
REGISTER_OPERATOR(pow_grad,
ops::PowOpGrad,
ops::ActivationGradOpInplaceInferer,
ops::PowDoubleGradOpMaker<paddle::framework::OpDesc>,
ops::PowDoubleGradOpMaker<paddle::imperative::OpBase>);
REGISTER_OPERATOR(pow_double_grad,
ops::PowOpDoubleGrad,
ops::ActivationDoubleGradOpInplaceInferer,
ops::PowTripleGradOpMaker<paddle::framework::OpDesc>,
ops::PowTripleGradOpMaker<paddle::imperative::OpBase>,
PowDoubleGradInferShapeFunctor);
REGISTER_OPERATOR(pow_triple_grad,
ops::PowOpTripleGrad,
PowTripleGradInferShapeFunctor);
/* ========================================================================== */
/* ========================== register checkpoint ===========================*/
REGISTER_OP_VERSION(leaky_relu)
.AddCheckpoint(
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/var_type_inference.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/multiary.h"
namespace paddle {
namespace operators {
using framework::DDim;
class BroadcastTensorsOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
// Broadcast semantics enforces all input variables having the same
// DataType/VarType
// This condition is also checked during VarType Inference
// Here we simply copy input type to output
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"),
ctx.GetPlace());
}
};
class BroadcastTensorsOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput(
"X",
"A Varaible list. The shape and data type of the list elements"
"should be consistent. Variable can be multi-dimensional Tensor"
"or phi::DenseTensor, and data types can be: bool, float16, float32, "
"float64, int32, "
"int64.")
.AsDuplicable();
AddOutput("Out",
"the sum of input :code:`x`. its shape and data types are "
"consistent with :code:`x`.")
.AsDuplicable();
AddComment(
R"DOC(This OP is used to broadcast a vector of inputs
with phi::DenseTensor type, following broadcast semantics.)DOC");
}
};
class BroadcastTensorsOpVarTypeInference : public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext* ctx) const override {
// We need at least two tensors to satisfy broadcast semantics
size_t input_size = ctx->InputSize("X");
PADDLE_ENFORCE_GT(
input_size,
0,
platform::errors::InvalidArgument(
"BroadcastTensorsOp should have at least one input variables,"
"but only received %d ",
input_size));
// BroadcastTensorsOp takes a vector of variables named "X"
// Here we loop through input variables,
// and check if their DataType/VarType are the same
auto var_type = ctx->GetInputType("X", 0);
auto data_type = ctx->GetInputDataType("X", 0);
for (size_t ind = 1; ind < input_size; ind++) {
auto cur_var_type = ctx->GetInputType("X", ind);
PADDLE_ENFORCE_EQ(
var_type,
cur_var_type,
platform::errors::InvalidArgument(
"inputs to BroadcastTensorsOp should have the same variable type,"
"but detected %d v.s %d ",
framework::ToTypeName(var_type),
framework::ToTypeName(cur_var_type)));
auto cur_data_type = ctx->GetInputDataType("X", ind);
PADDLE_ENFORCE_EQ(
data_type,
cur_data_type,
platform::errors::InvalidArgument(
"inputs to BroadcastTensorsOp should have the same data type,"
"but detected %d v.s %d ",
framework::ToTypeName(var_type),
framework::ToTypeName(cur_var_type)));
}
// Outputs having the same DataType/VarType as inputs
ctx->SetOutputType("Out", var_type, framework::ALL_ELEMENTS);
ctx->SetOutputDataType("Out", data_type, framework::ALL_ELEMENTS);
}
};
/* ------ BroadcastTensorsGradOp ------ */
class BroadcastTensorsGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
OP_INOUT_CHECK(ctx->HasOutputs(framework::GradVarName("X")),
"Output",
"X@grad",
"broadcast_tensors");
OP_INOUT_CHECK(ctx->HasInputs("X"), "Input", "X", "broadcast_tensors");
OP_INOUT_CHECK(ctx->HasInputs(framework::GradVarName("Out")),
"Input",
"Out@grad",
"broadcast_tensors");
const auto& forward_input_dims = ctx->GetInputsDim("X");
ctx->SetOutputsDim(framework::GradVarName("X"), forward_input_dims);
ctx->ShareAllLoD("X", /*->*/ framework::GradVarName("X"));
}
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Out")),
ctx.device_context().GetPlace());
}
};
template <typename T>
class BroadcastTensorsGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("broadcast_tensors_grad");
// We need "X" only for backward shape inference
grad_op->SetInput("X", this->Input("X"));
grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
grad_op->SetOutput(framework::GradVarName("X"),
this->InputGrad("X", /* drop_empty_grad */ false));
grad_op->SetAttrMap(this->Attrs());
}
};
class BroadcastTensorsGradOpVarTypeInference
: public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext* ctx) const override {
auto var_type = ctx->GetInputType("X", 0);
auto data_type = ctx->GetInputDataType("X", 0);
ctx->SetOutputType(
framework::GradVarName("X"), var_type, framework::ALL_ELEMENTS);
ctx->SetOutputDataType(
framework::GradVarName("X"), data_type, framework::ALL_ELEMENTS);
}
};
DECLARE_NO_NEED_BUFFER_VARS_INFERER(BroadcastTensorsGradNoNeedBufVarsInferer,
"X");
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
DECLARE_INFER_SHAPE_FUNCTOR(broadcast_tensors,
BroadcastTensorsInferShapeFunctor,
PD_INFER_META(phi::BroadcastTensorsInferMeta));
REGISTER_OPERATOR(broadcast_tensors,
ops::BroadcastTensorsOp,
ops::BroadcastTensorsOpMaker,
ops::BroadcastTensorsGradOpMaker<paddle::framework::OpDesc>,
ops::BroadcastTensorsGradOpMaker<paddle::imperative::OpBase>,
ops::BroadcastTensorsOpVarTypeInference,
BroadcastTensorsInferShapeFunctor);
REGISTER_OPERATOR(broadcast_tensors_grad,
ops::BroadcastTensorsGradOp,
ops::BroadcastTensorsGradOpVarTypeInference,
ops::BroadcastTensorsGradNoNeedBufVarsInferer);
......@@ -128,6 +128,19 @@
func : bmm_grad
data_type : out_grad
- backward_op : broadcast_tensors_grad
forward : broadcast_tensors (Tensor[] input) -> Tensor[](out)
args : (Tensor[] input, Tensor[] out_grad)
output : Tensor[](input_grad)
infer_meta :
func : UnchangedMultiInferMeta
param : [input]
kernel :
func : broadcast_tensors_grad
param : [input, out_grad]
data_type : out_grad
no_need_buffer : input
- backward_op : ceil_grad
forward : ceil(Tensor x) -> Tensor(out)
args : (Tensor out_grad)
......@@ -933,6 +946,43 @@
kernel :
func : poisson_grad
- backward_op : pow_double_grad
forward : pow_grad(Tensor x, Tensor grad_out, Scalar y) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_out, Tensor grad_x_grad, Scalar y)
output : Tensor(x_grad), Tensor(grad_out_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, grad_out]
kernel :
func : pow_double_grad
data_type : x
backward : pow_triple_grad
inplace : (grad_x_grad -> x_grad)
- backward_op : pow_grad
forward : pow(Tensor x, Scalar y=1.0f) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar y=-1)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow_grad
data_type : out_grad
backward: pow_double_grad
inplace : (out_grad -> x_grad)
- backward_op : pow_triple_grad
forward : pow_double_grad(Tensor x, Tensor grad_out, Tensor grad_grad_x, Scalar y) -> Tensor(grad_x), Tensor(grad_grad_out)
args : (Tensor x, Tensor grad_out, Tensor grad_grad_x, Tensor grad_x_grad, Tensor grad_grad_out_grad, Scalar y)
output : Tensor(x_grad), Tensor(grad_out_grad), Tensor(grad_grad_x_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param: [x, grad_out, grad_grad_x]
kernel :
func : pow_triple_grad
data_type : x
- backward_op : put_along_axis_grad
forward : put_along_axis (Tensor arr, Tensor indices, Tensor value, int axis, str reduce = "assign") -> Tensor(out)
args : (Tensor arr, Tensor indices, Tensor out_grad, int axis, str reduce)
......
......@@ -175,18 +175,6 @@
kernel :
func : bilinear_tensor_product_grad
- backward_op : broadcast_tensors_grad
forward : broadcast_tensors (Tensor[] input) -> Tensor[](out)
args : (Tensor[] input, Tensor[] out_grad)
output : Tensor[](input_grad)
infer_meta :
func : UnchangedMultiInferMeta
param : [input]
kernel :
func : broadcast_tensors_grad
param : [out_grad]
no_need_buffer : input
- backward_op : cast_grad
forward : cast (Tensor x, DataType dtype) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
......@@ -1010,40 +998,6 @@
func : pool3d_grad
param : [x, out, out_grad, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
- backward_op : pow_double_grad
forward : pow_grad(Tensor x, Tensor grad_out, Scalar y) -> Tensor(grad_x)
args : (Tensor x, Tensor grad_out, Tensor grad_x_grad, Scalar y)
output : Tensor(x_grad), Tensor(grad_out_grad)
infer_meta :
func : GeneralBinaryGradInferMeta
param: [x, grad_out]
kernel :
func : pow_double_grad
backward : pow_triple_grad
inplace : (grad_x_grad -> x_grad)
- backward_op : pow_grad
forward : pow(Tensor x, Scalar y) -> Tensor(out)
args : (Tensor x, Tensor out_grad, Scalar y=-1)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow_grad
backward: pow_double_grad
inplace : (out_grad -> x_grad)
- backward_op : pow_triple_grad
forward : pow_double_grad(Tensor x, Tensor grad_out, Tensor grad_grad_x, Scalar y) -> Tensor(grad_x), Tensor(grad_grad_out)
args : (Tensor x, Tensor grad_out, Tensor grad_grad_x, Tensor grad_x_grad, Tensor grad_grad_out_grad, Scalar y)
output : Tensor(x_grad), Tensor(grad_out_grad), Tensor(grad_grad_x_grad)
infer_meta :
func : GeneralTernaryGradInferMeta
param: [x, grad_out, grad_grad_x]
kernel :
func : pow_triple_grad
- backward_op : prelu_grad
forward : prelu(Tensor x, Tensor alpha, str data_format, str mode) -> Tensor(out)
args : (Tensor x, Tensor alpha, Tensor out_grad, str data_format, str mode)
......
......@@ -318,15 +318,6 @@
func : box_coder
optional : prior_box_var
- op : broadcast_tensors
args: (Tensor[] input)
output: Tensor[]{input.size()}
infer_meta:
func: BroadcastTensorsInferMeta
kernel:
func: broadcast_tensors
backward: broadcast_tensors_grad
- op : cast
args : (Tensor x, DataType dtype)
output : Tensor
......@@ -1365,17 +1356,6 @@
param : [x, kernel_size, strides, paddings, ceil_mode, exclusive, data_format, pooling_type, global_pooling, adaptive, padding_algorithm]
backward : pool3d_grad
- op : pow
args : (Tensor x, Scalar y)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow
data_type : x
backward : pow_grad
- op : prelu
args : (Tensor x, Tensor alpha, str data_format, str mode)
output : Tensor(out)
......
......@@ -164,6 +164,14 @@
outputs :
out : Out
- op : broadcast_tensors
backward : broadcast_tensors_grad
inputs :
input : X
outputs :
out : Out
drop_empty_grad : [input_grad]
- op : ceil
backward : ceil_grad
inputs :
......@@ -1045,6 +1053,19 @@
extra :
attrs : [bool use_mkldnn = false]
- op : pow
backward : pow_grad, pow_double_grad, pow_triple_grad
inputs :
x : X
outputs :
out : Out
attrs :
y : factor
scalar :
y :
data_type : float
tensor_name : FactorTensor
- op : prelu
backward : prelu_grad
extra :
......
......@@ -134,6 +134,16 @@
func : bmm
backward : bmm_grad
- op : broadcast_tensors
args: (Tensor[] input)
output: Tensor[]{input.size()}
infer_meta:
func: BroadcastTensorsInferMeta
kernel:
func: broadcast_tensors
data_type : input
backward: broadcast_tensors_grad
- op : ceil
args : (Tensor x)
output : Tensor(out)
......@@ -911,6 +921,17 @@
func : poisson
backward : poisson_grad
- op : pow
args : (Tensor x, Scalar y=1.0f)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : pow
data_type : x
backward : pow_grad
- op : put_along_axis
args : (Tensor arr, Tensor indices, Tensor values, int axis, str reduce = "assign")
output : Tensor(out)
......
......@@ -22,6 +22,8 @@ namespace phi {
template <typename T, typename Context>
void BroadcastTensorsGradKernel(const Context& ctx,
const std::vector<const DenseTensor*>&
inputs, // just for aligning to infershape
const std::vector<const DenseTensor*>& dout,
std::vector<DenseTensor*> dx);
......
......@@ -60,8 +60,10 @@ namespace phi {
template <typename T, typename Context>
void BroadcastTensorsGradKernel(const Context& ctx,
const std::vector<const DenseTensor*>& inputs,
const std::vector<const DenseTensor*>& dout,
std::vector<DenseTensor*> dx) {
(void)inputs;
// Find reduce dimensions
const auto& in_tensors = dout;
auto& out_tensors = dx;
......
......@@ -28,8 +28,10 @@ namespace phi {
template <typename T, typename Context>
void BroadcastTensorsGradKernel(const Context& ctx,
const std::vector<const DenseTensor*>& inputs,
const std::vector<const DenseTensor*>& dout,
std::vector<DenseTensor*> dx) {
(void)inputs;
// Find reduce dimensions
const auto& in_tensors = dout;
auto& out_tensors = dx;
......
......@@ -66,51 +66,6 @@ KernelSignature Relu6OpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("relu6_raw", {"X"}, {"threshold"}, {"Out"});
}
KernelSignature PowOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (ctx.HasInput("FactorTensor")) {
return KernelSignature("pow", {"X"}, {"FactorTensor"}, {"Out"});
} else {
return KernelSignature("pow", {"X"}, {"factor"}, {"Out"});
}
}
KernelSignature PowGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
if (ctx.HasInput("FactorTensor")) {
return KernelSignature(
"pow_grad", {"X", "Out@GRAD"}, {"FactorTensor"}, {"X@GRAD"});
} else {
return KernelSignature(
"pow_grad", {"X", "Out@GRAD"}, {"factor"}, {"X@GRAD"});
}
}
KernelSignature PowDoubleGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
if (ctx.HasInput("FactorTensor")) {
return KernelSignature("pow_double_grad",
{"X", "DOut", "DDX"},
{"FactorTensor"},
{"DX", "DDOut"});
} else {
return KernelSignature(
"pow_double_grad", {"X", "DOut", "DDX"}, {"factor"}, {"DX", "DDOut"});
}
}
KernelSignature PowTripleGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
if (ctx.HasInput("FactorTensor")) {
return KernelSignature("pow_triple_grad",
{"X", "DOut", "DDX", "D_DX", "D_DDOut"},
{"FactorTensor"},
{"D_X", "D_DOut", "D_DDX"});
} else {
return KernelSignature("pow_triple_grad",
{"X", "DOut", "DDX", "D_DX", "D_DDOut"},
{"factor"},
{"D_X", "D_DOut", "D_DDX"});
}
}
} // namespace phi
PD_REGISTER_BASE_KERNEL_NAME(hard_swish, hardswish);
......@@ -126,9 +81,3 @@ PD_REGISTER_ARG_MAPPING_FN(hard_swish_grad,
PD_REGISTER_ARG_MAPPING_FN(hard_swish, phi::HardSwishOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(swish_grad, phi::SwishGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(swish, phi::SwishOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pow_grad, phi::PowGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pow_double_grad,
phi::PowDoubleGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pow_triple_grad,
phi::PowTripleGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(pow, phi::PowOpArgumentMapping);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature BroadcastTensorsGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"broadcast_tensors_grad", {"Out@GRAD"}, {}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(broadcast_tensors_grad,
phi::BroadcastTensorsGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册