未验证 提交 31f3f643 编写于 作者: Z zyfncg 提交者: GitHub

Generate static graph code for some ops by yaml (part3) (#47803)

* generate static graph code for some ops by yaml

* remove deleted files

* update cmake

* update cmake

* udpate cmake
上级 95e494e0
......@@ -153,7 +153,7 @@ if(WITH_MKLDNN)
mkldnn_placement_pass
op_registry
elementwise_add_op
gelu_op
generated_op
activation_op
softmax_op
softmax
......@@ -418,7 +418,7 @@ if(WITH_MKLDNN)
im2col
vol2col
batch_norm_op
gelu_op
generated_op
activation_op
elementwise_add_op
concat_and_split
......
......@@ -248,10 +248,6 @@ if(WITH_UNITY_BUILD)
target_link_libraries(paddle_operators_unity ${OP_HEADER_DEPS} ${COMMON_OP_DEPS})
endif()
if(WITH_ASCEND_CL)
cc_test(gelu_op_npu_test SRCS gelu_op_npu_test.cc DEPS op_registry gelu_op scope device_context enforce executor)
endif()
if (WITH_GPU OR WITH_ASCEND_CL)
cc_test(copy_cross_scope_test SRCS copy_cross_scope_test.cc DEPS op_registry copy_cross_scope_op scope device_context enforce executor)
endif()
......
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/binary.h"
namespace paddle {
namespace operators {
class FillDiagonalTensorOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddComment(R"DOC(Fill replace operator
Fill the diagonal of an tensor with `Y` Tensor.
)DOC");
AddInput("X", "(Tensor) The input tensor.");
AddInput("Y", "(Tensor) The input tensor to fill in.");
AddOutput("Out",
"Tensor, the output tensor, with the same shape and data type "
"as input(x)");
AddAttr<int>("dim1", "the first dim to figure out the diagonal")
.SetDefault(0);
AddAttr<int>("dim2", "the second dim to figure out the diagonal")
.SetDefault(1);
AddAttr<int64_t>("offset",
"offset of diagonal, zero means no offset, positive means "
"offset to up-right corner; negtive means offset to "
"bottom-left corner")
.SetDefault(0);
}
};
class FillDiagonalTensorOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
return framework::OpKernelType(
OperatorWithKernel::IndicateVarDataType(ctx, "X"), ctx.GetPlace());
}
};
class FillDiagonalTensorOpVarTypeInference
: public framework::VarTypeInference {
public:
void operator()(framework::InferVarTypeContext *ctx) const override {
auto var_type = ctx->GetInputType("X", 0);
auto data_type = ctx->GetInputDataType("X", 0);
ctx->SetOutputType("Out", var_type, framework::ALL_ELEMENTS);
ctx->SetOutputDataType("Out", data_type, framework::ALL_ELEMENTS);
}
};
class FillDiagonalTensorGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
// Note: don't get data type from ctx.Input<phi::DenseTensor>("Input");
auto dtype =
ctx.Input<phi::DenseTensor>(framework::GradVarName("Out"))->type();
return framework::OpKernelType(framework::TransToProtoVarType(dtype),
ctx.GetPlace());
}
};
template <typename T>
class FillDiagonalTensorGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> retv) const override {
retv->SetType("fill_diagonal_tensor_grad");
retv->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
retv->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
retv->SetAttrMap(this->Attrs());
}
};
DECLARE_INPLACE_OP_INFERER(FillDiagonalTensorOpInplaceInferer, {"X", "Out"});
DECLARE_INPLACE_OP_INFERER(FillDiagonalTensorGradOpInplaceInferer,
{framework::GradVarName("Out"),
framework::GradVarName("X")});
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(fill_diagonal_tensor,
FillDiagonalTensorInferShapeFunctor,
PD_INFER_META(phi::FillDiagonalTensorInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(
fill_diagonal_tensor_grad,
FillDiagonalTensorGradInferShapeFunctor,
PD_INFER_META(phi::FillDiagonalTensorGradInferMeta));
REGISTER_OPERATOR(
fill_diagonal_tensor,
ops::FillDiagonalTensorOp,
ops::FillDiagonalTensorGradOpMaker<paddle::framework::OpDesc>,
ops::FillDiagonalTensorGradOpMaker<paddle::imperative::OpBase>,
ops::FillDiagonalTensorOpMaker,
ops::FillDiagonalTensorOpInplaceInferer,
ops::FillDiagonalTensorOpVarTypeInference,
FillDiagonalTensorInferShapeFunctor);
REGISTER_OPERATOR(fill_diagonal_tensor_grad,
ops::FillDiagonalTensorGradOp,
ops::FillDiagonalTensorGradOpInplaceInferer,
FillDiagonalTensorGradInferShapeFunctor);
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. */
#include <memory>
#include <vector>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class FoldOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
OperatorWithKernel::IndicateVarDataType(ctx, "X"),
ctx.device_context());
}
};
class FoldOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"Tensor, "
"the input of fold op. "
"The format of X is [N, C_in, L], "
"where N is the batch size, C_in is the input channels, "
"L is the length");
AddOutput("Y",
"Tensor, "
"the output of unfold op. "
"The format of Y is [N, C_out, output_height, output_width], "
"where N is the batch size, "
"C_in is the output channels of Y, output_height and "
"output_width "
"is the calculated height and width of output feature map.");
AddAttr<std::vector<int>>(
"output_sizes",
"vector<int>, the output sizes of the convolution operator.");
AddAttr<std::vector<int>>(
"kernel_sizes",
"vector<int>, the kernel sizes of the convolution operator.");
AddAttr<std::vector<int>>(
"strides", "vector<int>, the strides of the convolution operator.");
AddAttr<std::vector<int>>(
"paddings",
"vector<int>, the paddings applied to pad the feature map.");
AddAttr<std::vector<int>>(
"dilations", "vector<int>, the dilations of the convolution operator.");
AddComment(R"DOC(
**Fold Operator**
This Operator is used to combines an array of sliding local blocks into a large containing
tensor. also known as col2im when operated on batched 2D image tensor. Fold calculates each
combined value in the resulting large tensor by summing all values from all containing blocks.
Unfold extracts the values in the local blocks by copying from the large tensor. So, if the
blocks overlap, they are not inverses of each other.
)DOC");
}
};
class FoldGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Y")),
ctx.device_context());
}
};
template <typename T>
class FoldGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("fold_grad");
op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));
op->SetInput("X", this->Input("X"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetAttrMap(this->Attrs());
}
};
DECLARE_NO_NEED_BUFFER_VARS_INFERER(FoldGradOpNoNeedBufferVarsInferer, "X");
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(fold,
FoldInferShapeFunctor,
PD_INFER_META(phi::FoldInferMeta));
REGISTER_OPERATOR(fold,
ops::FoldOp,
ops::FoldOpMaker,
ops::FoldGradMaker<paddle::framework::OpDesc>,
ops::FoldGradMaker<paddle::imperative::OpBase>,
FoldInferShapeFunctor);
DECLARE_INFER_SHAPE_FUNCTOR(fold_grad,
FoldGradInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(fold_grad,
ops::FoldGradOp,
ops::FoldGradOpNoNeedBufferVarsInferer,
FoldGradInferShapeFunctor);
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/binary.h"
namespace paddle {
namespace operators {
class GatherTreeOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
OperatorWithKernel::IndicateVarDataType(ctx, "Ids"),
ctx.device_context());
}
};
class GatherTreeOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("Ids",
"The Tensor with shape [length, batch_size, beam_size] containing "
"the selected ids of all time steps.");
AddInput("Parents",
"The Tensor has the same shape as Ids and contains the parents "
"corresponding to selected ids when searching among beams.");
AddOutput(
"Out",
"A Tensor with shape [length, batch_size, beam_size] containing the "
"full sequences. The sequences is collected by backtracing from the "
"last time step of Ids.");
AddComment(R"DOC(
GatherTree Operator.
Backtrace from the last time step and generate the full sequences by collecting beam search
selected ids.
)DOC");
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(gather_tree,
GatherTreeInferShapeFunctor,
PD_INFER_META(phi::GatherTreeMeta));
REGISTER_OPERATOR(gather_tree,
ops::GatherTreeOp,
ops::GatherTreeOpMaker,
GatherTreeInferShapeFunctor);
/* Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <memory>
#include <string>
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/framework/operator.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class GeluOp : public framework::OperatorWithKernel {
public:
GeluOp(const std::string &type,
const framework::VariableNameMap &inputs,
const framework::VariableNameMap &outputs,
const framework::AttributeMap &attrs)
: OperatorWithKernel(type, inputs, outputs, attrs) {}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(data_type, ctx.GetPlace());
}
};
class GeluGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Out")),
true,
platform::errors::InvalidArgument(
"Input(%s) of GeluGradOp should not be null.", "DOut"));
PADDLE_ENFORCE_EQ(ctx->HasInput("X"),
true,
platform::errors::InvalidArgument(
"Input(%s) of GeluGradOp should not be null.", "X"));
PADDLE_ENFORCE_EQ(
ctx->HasOutput(framework::GradVarName("X")),
true,
platform::errors::InvalidArgument(
"Output(%s) of GeluGradOp should not be null.", "DX"));
auto x_grad_name = framework::GradVarName("X");
ctx->SetOutputDim(x_grad_name, ctx->GetInputDim("X"));
ctx->ShareLoD("X", /*->*/ x_grad_name);
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext &ctx) const override {
auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
return framework::OpKernelType(data_type, ctx.GetPlace());
}
};
class GeluOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X", "Input of Gelu operator");
AddOutput("Out", "Output of Gelu operator");
AddAttr<bool>("approximate",
"(bool, default false) use approximation of gelu")
.SetDefault(false);
AddComment(R"DOC(
Gelu Activation Operator.
For more details, please refer to [Gaussian Error Linear Units](https://arxiv.org/pdf/1606.08415.pdf).
when using approximation
$out = \\frac{1}{2}x(1+tanh(\\sqrt{\\frac{2}{\\pi}}(x+0.044715x^{3}))$
or else
$out = \\frac{1 + erf(\\frac{x}{\\sqrt{2}})}{2} x$
)DOC");
}
};
template <typename T>
class GeluGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> grad_op) const override {
grad_op->SetType("gelu_grad");
grad_op->SetInput("X", this->Input("X"));
grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
grad_op->SetAttrMap(this->Attrs());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(gelu,
GeluInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(gelu,
ops::GeluOp,
ops::GeluOpMaker,
ops::GeluGradOpMaker<paddle::framework::OpDesc>,
ops::GeluGradOpMaker<paddle::imperative::OpBase>,
GeluInferShapeFunctor);
REGISTER_OPERATOR(gelu_grad, ops::GeluGradOp);
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class GumbelSoftmaxOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
OperatorWithKernel::IndicateVarDataType(ctx, "X"),
ctx.device_context());
}
};
class GumbelSoftmaxOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"(Tensor) An N-D Tensor, N >= 1,"
"The first N - 1 dimensions index into a batch of independent "
"distributions "
"and the last dimension represents a vector of probabilities for "
"each class.");
AddOutput("Out", "The sampled tensor with the same shape as X.");
AddAttr<float>("temperature",
"(float, default 1.0) non-negative scalar temperature.")
.SetDefault(1.0);
AddAttr<bool>(
"hard",
"(bool, default false) "
"if True, the returned samples will be discretized as one-hot vectors, "
"but will be differentiated as if it is the soft sample in autograd.")
.SetDefault(false);
AddAttr<int>("axis",
"(int, default -1)"
"The dimension index of Input(x) to perform gumbel_softmax.")
.SetDefault(-1);
AddComment(R"DOC(
GumbelSoftmax Operator.
Samples from the Gumbel-Softmax distribution and optionally discretizes.
)DOC");
}
};
class GumbelSoftmaxGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
template <typename T>
class GumbelSoftmaxGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("gumbel_softmax_grad");
op->SetInput("Out", this->Output("Out"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetAttrMap(this->Attrs());
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(gumbel_softmax,
GumbelSoftmaxInferShapeFunctor,
PD_INFER_META(phi::GumbelSoftmaxInferMeta));
DECLARE_INFER_SHAPE_FUNCTOR(gumbel_softmax_grad,
GumbelSoftmaxGradInferShapeFunctor,
PD_INFER_META(phi::GumbelSoftmaxGradInferMeta));
REGISTER_OPERATOR(gumbel_softmax,
ops::GumbelSoftmaxOp,
ops::GumbelSoftmaxOpMaker,
ops::GumbelSoftmaxGradOpMaker<paddle::framework::OpDesc>,
ops::GumbelSoftmaxGradOpMaker<paddle::imperative::OpBase>,
GumbelSoftmaxInferShapeFunctor);
REGISTER_OPERATOR(gumbel_softmax_grad,
ops::GumbelSoftmaxGradOp,
GumbelSoftmaxGradInferShapeFunctor);
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License. */
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/phi/infermeta/unary.h"
namespace paddle {
namespace operators {
class UnfoldOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput("X",
"Tensor, "
"the input of unfold op. "
"The format of X is [N, C_in, H, W], "
"where N is the batch size, C_in is the input channels, "
"H is the height and W is the width");
AddOutput(
"Y",
"Tensor, "
"the output of unfold op. "
"The format of Y is [N, C_in*filter_height*filter_width, "
"output_height*output_width], where N is the batch size, "
"C_in is the input channels of X, filter_height and filter_width is "
"height and width of the filtering kernel, output_height and "
"output_width "
"is the calculated height and width of output feature map.");
AddAttr<std::vector<int>>(
"kernel_sizes",
"vector<int>, the kernel sizes of the convolution operator.");
AddAttr<std::vector<int>>(
"strides", "vector<int>, the strides of the convolution operator.");
AddAttr<std::vector<int>>(
"paddings",
"vector<int>, the paddings applied to pad the feature map.");
AddAttr<std::vector<int>>(
"dilations", "vector<int>, the dilations of the convolution operator.");
AddComment(R"DOC(
**Unfold Operator**
This Operator is used to extract sliding local blocks from a batched input tensor, also known
as im2col when operated on batched 2D image tensor. For each block under the convolution filter,
all element will be rearranged as a column. While the convolution filter sliding over the input
feature map, a series of such columns will be formed.
)DOC");
}
};
class UnfoldOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(
OperatorWithKernel::IndicateVarDataType(ctx, "X"),
ctx.device_context());
}
};
class UnfoldGradOp : public framework::OperatorWithKernel {
public:
using framework::OperatorWithKernel::OperatorWithKernel;
void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE_EQ(
ctx->HasInput(framework::GradVarName("Y")),
true,
platform::errors::NotFound("The gradient of Y should not be null"));
PADDLE_ENFORCE_EQ(
ctx->HasInput("X"),
true,
platform::errors::NotFound("The input X should not be null"));
PADDLE_ENFORCE_EQ(
ctx->HasOutput(framework::GradVarName("X")),
true,
platform::errors::NotFound("The gradient of X should not be null"));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}
protected:
framework::OpKernelType GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType(
ctx, framework::GradVarName("Y")),
ctx.device_context());
}
};
template <typename T>
class UnfoldGradMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;
protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("unfold_grad");
op->SetInput(framework::GradVarName("Y"), this->OutputGrad("Y"));
op->SetInput("X", this->Input("X"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetAttrMap(this->Attrs());
}
};
DECLARE_NO_NEED_BUFFER_VARS_INFERER(UnfoldGradOpNoNeedBufferVarsInferer, "X");
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(unfold,
UnfoldInferShapeFunctor,
PD_INFER_META(phi::UnfoldInferMeta));
REGISTER_OPERATOR(unfold,
ops::UnfoldOp,
ops::UnfoldOpMaker,
ops::UnfoldGradMaker<paddle::framework::OpDesc>,
ops::UnfoldGradMaker<paddle::imperative::OpBase>,
UnfoldInferShapeFunctor);
REGISTER_OPERATOR(unfold_grad,
ops::UnfoldGradOp,
ops::UnfoldGradOpNoNeedBufferVarsInferer);
......@@ -390,6 +390,16 @@
data_type: out_grad
no_need_buffer: x
- backward_op : fill_diagonal_tensor_grad
forward : fill_diagonal_tensor (Tensor x, Tensor y, int64_t offset, int dim1, int dim2) -> Tensor(out)
args : (Tensor out_grad, int64_t offset, int dim1, int dim2)
output : Tensor(x_grad)
infer_meta :
func : FillDiagonalTensorGradInferMeta
kernel :
func : fill_diagonal_tensor_grad
inplace : (out_grad -> x_grad)
- backward_op : flip_grad
forward : flip (Tensor x, int[] axis) -> Tensor(out)
args : (Tensor out_grad, int[] axis)
......@@ -407,6 +417,25 @@
func : floor_grad
inplace : (out_grad -> x_grad)
- backward_op : gelu_grad
forward : gelu(Tensor x, bool approximate) -> Tensor(out)
args : (Tensor x, Tensor out_grad, bool approximate)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : gelu_grad
- backward_op : gumbel_softmax_grad
forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : GumbelSoftmaxGradInferMeta
kernel :
func : gumbel_softmax_grad
- backward_op : hardshrink_grad
forward : hardshrink (Tensor x, float threshold) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold)
......@@ -909,3 +938,27 @@
param : [out_grad]
kernel :
func : trunc_grad
- backward_op : unfold_grad
forward : unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : unfold_grad
data_type : out_grad
no_need_buffer : x
- backward_op: fold_grad
forward: fold (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
args: (Tensor x, Tensor out_grad, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output: Tensor(x_grad)
infer_meta:
func: UnchangedInferMeta
param : [x]
kernel:
func: fold_grad
data_type : out_grad
no_need_buffer : x
......@@ -541,16 +541,6 @@
kernel :
func : fill_diagonal_grad
- backward_op : fill_diagonal_tensor_grad
forward : fill_diagonal_tensor (Tensor x, Tensor y, int64_t offset, int dim1, int dim2) -> Tensor(out)
args : (Tensor out_grad, int64_t offset, int dim1, int dim2)
output : Tensor(x_grad)
infer_meta :
func : FillDiagonalTensorGradInferMeta
kernel :
func : fill_diagonal_tensor_grad
inplace : (out_grad -> x_grad)
- backward_op : fill_grad
forward : fill (Tensor x, Scalar value) -> Tensor(out)
args : (Tensor out_grad, Scalar value)
......@@ -639,16 +629,6 @@
func : gather_nd_grad
no_need_buffer : x
- backward_op : gelu_grad
forward : gelu(Tensor x, bool approximate) -> Tensor(out)
args : (Tensor x, Tensor out_grad, bool approximate)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : gelu_grad
- backward_op : grid_sample_grad
forward : grid_sample (Tensor x, Tensor grid, str mode, str padding_mode, bool align_corners) -> Tensor(out)
args : (Tensor x, Tensor grid, Tensor out_grad, str mode, str padding_mode, bool align_corners)
......@@ -673,16 +653,6 @@
optional: scale, bias
inplace : (y_grad -> x_grad)
- backward_op : gumbel_softmax_grad
forward : gumbel_softmax (Tensor x, float temperature, bool hard, int axis) -> Tensor(out)
args : (Tensor out, Tensor out_grad, int axis)
output : Tensor(x_grad)
infer_meta :
func : GumbelSoftmaxGradInferMeta
param : [out, out_grad, axis]
kernel :
func : gumbel_softmax_grad
- backward_op : hardswish_grad
forward : hardswish (Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad, float threshold = 6.0, float scale = 6.0, float offset = 3.0)
......@@ -1935,17 +1905,6 @@
output : Tensor(input_grad)
invoke : stack(out_grad, axis)
- backward_op : unfold_grad
forward : unfold (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
args : (Tensor x, Tensor out_grad, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output : Tensor(x_grad)
infer_meta :
func : UnchangedInferMeta
param : [x]
kernel :
func : unfold_grad
no_need_buffer : x
- backward_op : uniform_inplace_grad
forward : uniform_inplace(Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val) -> Tensor(out)
args : (Tensor out_grad, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
......@@ -2018,17 +1977,6 @@
func : yolo_loss_grad
optional : gt_score
- backward_op: fold_grad
forward: fold (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations) -> Tensor(out)
args: (Tensor x, Tensor out_grad, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output: Tensor(x_grad)
infer_meta:
func: UnchangedInferMeta
param : [x]
kernel:
func: fold_grad
no_need_buffer : x
- backward_op: unpool3d_grad
forward: unpool3d (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format) -> Tensor(out)
args: (Tensor x, Tensor indices, Tensor out, Tensor out_grad, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format)
......
......@@ -287,6 +287,15 @@
optional : bias
backward : bilinear_tensor_product_grad
- op : bincount
args: (Tensor x, Tensor weights, Scalar minlength)
output: Tensor(out)
infer_meta:
func: BincountInferMeta
kernel:
func: bincount
optional: weights
- op : bitwise_and
args : (Tensor x, Tensor y)
output : Tensor(out)
......@@ -328,6 +337,15 @@
func : box_coder
optional : prior_box_var
- op : broadcast_tensors
args: (Tensor[] input)
output: Tensor[]{input.size()}
infer_meta:
func: BroadcastTensorsInferMeta
kernel:
func: broadcast_tensors
backward: broadcast_tensors_grad
- op : cast
args : (Tensor x, DataType dtype)
output : Tensor
......@@ -543,6 +561,14 @@
func : depthwise_conv2d_transpose
backward : depthwise_conv2d_transpose_grad
- op : dirichlet
args: (Tensor alpha)
output: Tensor(out)
infer_meta:
func: DirichletInferMeta
kernel:
func: dirichlet
- op : distribute_fpn_proposals
args : (Tensor fpn_rois, Tensor rois_num, int min_level, int max_level, int refer_level, int refer_scale, bool pixel_offset)
output : Tensor[](multi_fpn_rois){max_level - min_level + 1}, Tensor[](multi_level_rois_num){max_level - min_level + 1}, Tensor(restore_index)
......@@ -719,16 +745,6 @@
inplace : (x -> out)
backward : fill_diagonal_grad
- op : fill_diagonal_tensor
args : (Tensor x, Tensor y, int64_t offset, int dim1, int dim2)
output : Tensor(out)
infer_meta :
func : FillDiagonalTensorInferMeta
kernel :
func : fill_diagonal_tensor
inplace : (x -> out)
backward : fill_diagonal_tensor_grad
- op : flatten
args : (Tensor x, int start_axis, int stop_axis)
output : Tensor(out), Tensor(xshape)
......@@ -859,14 +875,6 @@
data_type : x
backward : gather_nd_grad
- op : gather_tree
args : (Tensor ids, Tensor parents)
output : Tensor(out)
infer_meta :
func : GatherTreeMeta
kernel :
func : gather_tree
- op : gaussian
args : (IntArray shape, float mean, float std, int seed, DataType dtype, Place place={})
output: Tensor(out)
......@@ -879,16 +887,6 @@
data_type : dtype
backend : place
- op : gelu
args : (Tensor x, bool approximate)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : gelu
backward : gelu_grad
- op : generate_proposals
args : (Tensor scores, Tensor bbox_deltas, Tensor im_shape, Tensor anchors, Tensor variances, int pre_nms_top_n, int post_nms_top_n, float nms_thresh, float min_size, float eta, bool pixel_offset=true)
output : Tensor(rpn_rois), Tensor(rpn_roi_probs), Tensor(rpn_rois_num)
......@@ -935,15 +933,6 @@
intermediate : mean, variance
backward : group_norm_grad
- op : gumbel_softmax
args : (Tensor x, float temperature, bool hard, int axis)
output : Tensor
infer_meta :
func : GumbelSoftmaxInferMeta
kernel :
func : gumbel_softmax
backward : gumbel_softmax_grad
- op : hardswish
args : (Tensor x)
output : Tensor
......@@ -1658,6 +1647,15 @@
output : Tensor(out)
invoke : full_like(x, 1, dtype, place)
- op : overlap_add
args: (Tensor x, int hop_length, int axis)
output: Tensor
infer_meta:
func: OverlapAddInferMeta
kernel:
func: overlap_add
backward: overlap_add_grad
- op : p_norm
args : (Tensor x, float porder, int axis, float epsilon, bool keepdim, bool asvector=false)
output : Tensor(out)
......@@ -1899,6 +1897,21 @@
optional : mean_grad
inplace : (param -> param_out), (moment -> moment_out), (mean_square -> mean_square_out), (mean_grad -> mean_grad_out)
- op : rnn
args: (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor dropout_state_in, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false)
output: Tensor(out), Tensor(dropout_state_out), Tensor[](state){pre_state.size()}, Tensor(reserve)
infer_meta:
func: RnnInferMeta
param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test]
kernel:
func: rnn
param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test]
data_type: x
backward: rnn_grad
optional : sequence_length
intermediate : reserve
view : (dropout_state_in -> dropout_state_out)
- op : roi_align
args : (Tensor x, Tensor boxes, Tensor boxes_num, int pooled_height, int pooled_width, float spatial_scale, int sampling_ratio, bool aligned)
output : Tensor
......@@ -2335,15 +2348,6 @@
func : unbind
backward : unbind_grad
- op : unfold
args : (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output : Tensor
infer_meta :
func : UnfoldInferMeta
kernel :
func : unfold
backward : unfold_grad
- op : uniform
args : (IntArray shape, DataType dtype, Scalar min, Scalar max, int seed, Place place={})
output : Tensor(out)
......@@ -2356,6 +2360,17 @@
data_type : dtype
backend : place
- op : uniform_inplace
args: (Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
output: Tensor(out)
infer_meta:
func: UniformRandomInplaceInferMeta
kernel:
func: uniform_inplace
data_type: x
inplace: (x -> out)
backward: uniform_inplace_grad
# The `axis` argument of Python API paddle.unique is not vector
- op : unique
args : (Tensor x, bool return_index, bool return_inverse, bool return_counts, int[] axis, DataType dtype=DataType::INT64)
......@@ -2375,6 +2390,26 @@
func : unique_consecutive
data_type : x
- op : unpool
args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format)
output: Tensor(out)
infer_meta:
func: UnpoolInferMeta
kernel:
func: unpool
data_type: x
backward: unpool_grad
- op : unpool3d
args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format)
output: Tensor(out)
infer_meta:
func: Unpool3dInferMeta
kernel:
func: unpool3d
data_type: x
backward: unpool3d_grad
- op : unsqueeze
args : (Tensor x, IntArray axis)
output : Tensor(out), Tensor(xshape)
......@@ -2466,93 +2501,3 @@
args : (Tensor x, DataType dtype=DataType::UNDEFINED, Place place = {})
output : Tensor(out)
invoke : full_like(x, 0, dtype, place)
- op: bincount
args: (Tensor x, Tensor weights, Scalar minlength)
output: Tensor(out)
infer_meta:
func: BincountInferMeta
kernel:
func: bincount
optional: weights
- op: broadcast_tensors
args: (Tensor[] input)
output: Tensor[]{input.size()}
infer_meta:
func: BroadcastTensorsInferMeta
kernel:
func: broadcast_tensors
backward: broadcast_tensors_grad
- op: dirichlet
args: (Tensor alpha)
output: Tensor(out)
infer_meta:
func: DirichletInferMeta
kernel:
func: dirichlet
- op: fold
args: (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output: Tensor(out)
infer_meta:
func: FoldInferMeta
kernel:
func: fold
backward: fold_grad
- op: overlap_add
args: (Tensor x, int hop_length, int axis)
output: Tensor
infer_meta:
func: OverlapAddInferMeta
kernel:
func: overlap_add
backward: overlap_add_grad
- op: rnn
args: (Tensor x, Tensor[] pre_state, Tensor[] weight_list, Tensor sequence_length, Tensor dropout_state_in, float dropout_prob=0.0, bool is_bidirec=false, int input_size=10, int hidden_size=100, int num_layers=1, str mode="RNN_TANH", int seed=0, bool is_test=false)
output: Tensor(out), Tensor(dropout_state_out), Tensor[](state){pre_state.size()}, Tensor(reserve)
infer_meta:
func: RnnInferMeta
param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test]
kernel:
func: rnn
param : [x, pre_state, weight_list, sequence_length, dropout_prob, is_bidirec, input_size, hidden_size, num_layers, mode, seed, is_test]
data_type: x
backward: rnn_grad
optional : sequence_length
intermediate : reserve
view : (dropout_state_in -> dropout_state_out)
- op: uniform_inplace
args: (Tensor x, float min, float max, int seed, int diag_num, int diag_step, float diag_val)
output: Tensor(out)
infer_meta:
func: UniformRandomInplaceInferMeta
kernel:
func: uniform_inplace
data_type: x
inplace: (x -> out)
backward: uniform_inplace_grad
- op: unpool
args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format)
output: Tensor(out)
infer_meta:
func: UnpoolInferMeta
kernel:
func: unpool
data_type: x
backward: unpool_grad
- op: unpool3d
args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format)
output: Tensor(out)
infer_meta:
func: Unpool3dInferMeta
kernel:
func: unpool3d
data_type: x
backward: unpool3d_grad
......@@ -468,6 +468,12 @@
inputs: {x: X}
outputs: {out: Out}
- op : fill_diagonal_tensor
inputs :
{x : X, y : Y}
outputs :
out : Out
- op : flip
inputs :
x : X
......@@ -500,6 +506,12 @@
attrs : [bool use_mkldnn = false, str x_data_format = "", str y_data_format = "", str mkldnn_data_type = "float32",
bool use_quantizer = false, float Scale_x = 1.0f, float Scale_y = 1.0f, float Scale_out = 1.0f]
- op : fold
inputs :
x : X
outputs :
out : Y
- op : frobenius_norm
backward : frobenius_norm_grad
extra :
......@@ -514,8 +526,18 @@
extra :
attrs : [bool overwrite = true]
- op : gather_tree
inputs :
{ids : Ids, parents : Parents}
outputs :
out : Out
- op : gelu
backward : gelu_grad
inputs :
x : X
outputs :
out : Out
extra :
attrs : [bool use_mkldnn = false, str mkldnn_data_type = "float32"]
......@@ -534,6 +556,12 @@
extra :
attrs : [bool is_test = false]
- op : gumbel_softmax
inputs :
x : X
outputs :
out : Out
- op : hard_swish
backward : hard_swish_grad
extra :
......@@ -1052,6 +1080,12 @@
outputs :
out : Out
- op : unfold
inputs :
x : X
outputs :
out : Y
- op : while
backward : while_grad
extra :
......
......@@ -354,6 +354,16 @@
func : fft_r2c
backward : fft_r2c_grad
- op : fill_diagonal_tensor
args : (Tensor x, Tensor y, int64_t offset = 0, int dim1 = 0, int dim2 = 1)
output : Tensor(out)
infer_meta :
func : FillDiagonalTensorInferMeta
kernel :
func : fill_diagonal_tensor
inplace : (x -> out)
backward : fill_diagonal_tensor_grad
- op : flip
args : (Tensor x, int[] axis)
output : Tensor (out)
......@@ -373,6 +383,43 @@
inplace : (x -> out)
backward : floor_grad
- op : fold
args: (Tensor x, int[] output_sizes, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output: Tensor(out)
infer_meta:
func: FoldInferMeta
kernel:
func: fold
backward: fold_grad
- op : gather_tree
args : (Tensor ids, Tensor parents)
output : Tensor(out)
infer_meta :
func : GatherTreeMeta
kernel :
func : gather_tree
data_type : ids
- op : gelu
args : (Tensor x, bool approximate = false)
output : Tensor(out)
infer_meta :
func : UnchangedInferMeta
param: [x]
kernel :
func : gelu
backward : gelu_grad
- op : gumbel_softmax
args : (Tensor x, float temperature = 1.0, bool hard = false, int axis = -1)
output : Tensor
infer_meta :
func : GumbelSoftmaxInferMeta
kernel :
func : gumbel_softmax
backward : gumbel_softmax_grad
- op : hardshrink
args : (Tensor x, float threshold = 0.5)
output : Tensor (out)
......@@ -687,3 +734,12 @@
kernel :
func : trunc
backward : trunc_grad
- op : unfold
args : (Tensor x, int[] kernel_sizes, int[] strides, int[] paddings, int[] dilations)
output : Tensor(out)
infer_meta :
func : UnfoldInferMeta
kernel :
func : unfold
backward : unfold_grad
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature FillDiagonalTensorOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"fill_diagonal_tensor", {"X", "Y"}, {"offset", "dim1", "dim2"}, {"Out"});
}
KernelSignature FillDiagonalTensorGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("fill_diagonal_tensor_grad",
{"Out@GRAD"},
{"offset", "dim1", "dim2"},
{"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(fill_diagonal_tensor,
phi::FillDiagonalTensorOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(fill_diagonal_tensor_grad,
phi::FillDiagonalTensorGradOpArgumentMapping);
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature FoldGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"fold_grad",
{"X", "Y@GRAD"},
{"output_sizes", "kernel_sizes", "strides", "paddings", "dilations"},
{"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(fold_grad, phi::FoldGradOpArgumentMapping);
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature GeluOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("gelu", {"X"}, {"approximate"}, {"Out"});
}
KernelSignature GeluGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"gelu_grad", {"X", "Out@GRAD"}, {"approximate"}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(gelu_grad, phi::GeluGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(gelu, phi::GeluOpArgumentMapping);
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature GumbelSoftmaxOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"gumbel_softmax", {"X"}, {"temperature", "hard", "axis"}, {"Out"});
}
KernelSignature GumbelSoftmaxGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature(
"gumbel_softmax_grad", {"Out", "Out@GRAD"}, {"axis"}, {"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(gumbel_softmax, phi::GumbelSoftmaxOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(gumbel_softmax_grad,
phi::GumbelSoftmaxGradOpArgumentMapping);
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature UnfoldGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature("unfold_grad",
{"X", "Y@GRAD"},
{"kernel_sizes", "strides", "paddings", "dilations"},
{"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(unfold_grad, phi::UnfoldGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册