未验证 提交 005fee12 编写于 作者: S Sanbu 提交者: GitHub

Support static graph code-gen for unpool (#52947)

上级 896c9315
...@@ -24,64 +24,6 @@ limitations under the License. */ ...@@ -24,64 +24,6 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace operators { namespace operators {
class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker {
public:
void Make() override {
AddInput(
"X",
"(Tensor) The input tensor of unpool operator. "
"The format of input tensor is NCHW. Where N is batch size, C is the "
"number of channels, H and W is the height and width of feature.");
AddInput(
"Indices",
"(Tensor) The input tensor of the indices given out by MaxPool2d. "
"The format of input tensor is NCHW. Where N is batch size, C is the "
"number of channels, H and W is the height and width of feature.");
AddOutput("Out",
"(Tensor) The output tensor of unpool operator."
"The format of output tensor is also NCHW."
"Where N is batch size, C is "
"the number of channels, H and W is the height and "
"width of feature.");
AddAttr<std::vector<int>>(
"ksize",
"(vector), the unpooling window size(height, width) "
"of unpooling operator.");
AddAttr<std::vector<int>>("strides",
"(vector, default:{1, 1}), "
"strides (height, width) of unpooling operator.")
.SetDefault({1, 1});
AddAttr<std::vector<int>>("paddings",
"(vector default:{0,0}), "
"paddings (height, width) of unpooling operator.")
.SetDefault({0, 0});
AddAttr<std::string>(
"unpooling_type",
"(string), unpooling type, can be \"max\" for max-unpooling ")
.InEnum({"max"});
AddAttr<std::vector<int>>("output_size",
"(vector, optional). The shape of output.")
.SetDefault({0, 0})
.SupportTensor();
AddAttr<std::string>(
"data_format",
"(string, default NCHW) Only used in "
"An optional string from: \"NHWC\", \"NCHW\". "
"Defaults to \"NHWC\". Specify the data format of the output data, "
"the input will be transformed automatically. ")
.SetDefault("NCHW");
AddComment(R"DOC(
Input shape is: $(N, C_{in}, H_{in}, W_{in})$, Output shape is:
$(N, C_{out}, H_{out}, W_{out})$, where
$$
H_{out} = (H_{in}-1) * strides[0] - 2 * paddings[0] + ksize[0] \\
W_{out} = (W_{in}-1) * strides[1] - 2 * paddings[1] + ksize[1]
$$
Paper: http://www.matthewzeiler.com/wp-content/uploads/2017/07/iccv2011.pdf
)DOC");
}
};
class Unpool3dOpMaker : public framework::OpProtoAndCheckerMaker { class Unpool3dOpMaker : public framework::OpProtoAndCheckerMaker {
public: public:
void Make() override { void Make() override {
...@@ -200,18 +142,6 @@ class Unpool3dOpGradMaker : public framework::SingleGradOpMaker<T> { ...@@ -200,18 +142,6 @@ class Unpool3dOpGradMaker : public framework::SingleGradOpMaker<T> {
} }
}; };
class UnpoolOpGrad : public framework::OperatorWithKernel {
protected:
phi::KernelKey GetExpectedKernelType(
const framework::ExecutionContext& ctx) const override {
return phi::KernelKey(OperatorWithKernel::IndicateVarDataType(ctx, "X"),
ctx.GetPlace());
}
public:
using framework::OperatorWithKernel::OperatorWithKernel;
};
class Unpool3dOpGrad : public framework::OperatorWithKernel { class Unpool3dOpGrad : public framework::OperatorWithKernel {
protected: protected:
phi::KernelKey GetExpectedKernelType( phi::KernelKey GetExpectedKernelType(
...@@ -228,21 +158,6 @@ class Unpool3dOpGrad : public framework::OperatorWithKernel { ...@@ -228,21 +158,6 @@ class Unpool3dOpGrad : public framework::OperatorWithKernel {
} // namespace paddle } // namespace paddle
namespace ops = paddle::operators; namespace ops = paddle::operators;
DECLARE_INFER_SHAPE_FUNCTOR(unpool,
UnpoolInferShapeFunctor,
PD_INFER_META(phi::UnpoolInferMeta));
REGISTER_OPERATOR(unpool,
ops::UnpoolOp,
ops::Unpool2dOpMaker,
ops::UnpoolOpGradMaker<paddle::framework::OpDesc>,
ops::UnpoolOpGradMaker<paddle::imperative::OpBase>,
UnpoolInferShapeFunctor);
DECLARE_INFER_SHAPE_FUNCTOR(unpool_grad,
UnpoolGradInferShapeFunctor,
PD_INFER_META(phi::UnchangedInferMeta));
REGISTER_OPERATOR(unpool_grad, ops::UnpoolOpGrad, UnpoolGradInferShapeFunctor);
DECLARE_INFER_SHAPE_FUNCTOR(unpool, DECLARE_INFER_SHAPE_FUNCTOR(unpool,
Unpool3dInferShapeFunctor, Unpool3dInferShapeFunctor,
......
...@@ -2064,3 +2064,14 @@ ...@@ -2064,3 +2064,14 @@
kernel : kernel :
func : where_grad func : where_grad
no_need_buffer : x, y no_need_buffer : x, y
- backward_op: unpool_grad
forward: unpool (Tensor x, Tensor indices, int[] ksize, int[] strides = {1,1}, int[] paddings ={0,0} ,IntArray output_size = {0,0}, str data_format="NCHW") -> Tensor(out)
args: (Tensor x, Tensor indices, Tensor out, Tensor out_grad, int[] ksize, int[] strides, int[] paddings, IntArray output_size, str data_format)
output: Tensor(x_grad)
infer_meta:
func: UnchangedInferMeta
param : [x]
kernel:
func: unpool_grad
data_type: x
...@@ -1053,14 +1053,3 @@ ...@@ -1053,14 +1053,3 @@
kernel: kernel:
func: unpool3d_grad func: unpool3d_grad
data_type: x data_type: x
- backward_op: unpool_grad
forward: unpool (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format) -> Tensor(out)
args: (Tensor x, Tensor indices, Tensor out, Tensor out_grad, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format)
output: Tensor(x_grad)
infer_meta:
func: UnchangedInferMeta
param : [x]
kernel:
func: unpool_grad
data_type: x
...@@ -1217,16 +1217,6 @@ ...@@ -1217,16 +1217,6 @@
func : unique func : unique
data_type : x data_type : x
- op : unpool
args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, IntArray output_size, str data_format)
output: Tensor(out)
infer_meta:
func: UnpoolInferMeta
kernel:
func: unpool
data_type: x
backward: unpool_grad
- op : unpool3d - op : unpool3d
args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format) args: (Tensor x, Tensor indices, int[] ksize, int[] strides, int[] padding, int[] output_size, str data_format)
output: Tensor(out) output: Tensor(out)
......
...@@ -2411,6 +2411,16 @@ ...@@ -2411,6 +2411,16 @@
outputs : outputs :
{out : Out, index : Index, counts : Counts} {out : Out, index : Index, counts : Counts}
- op : unpool
inputs :
{x : X, indices: Indices}
outputs :
out : Out
int_array :
output_size:
data_type : int
support_tensor : true
- op : unsqueeze (unsqueeze2) - op : unsqueeze (unsqueeze2)
backward : unsqueeze_grad (unsqueeze2_grad), unsqueeze_double_grad(unsqueeze2_double_grad) backward : unsqueeze_grad (unsqueeze2_grad), unsqueeze_double_grad(unsqueeze2_double_grad)
inputs : inputs :
......
...@@ -2133,6 +2133,16 @@ ...@@ -2133,6 +2133,16 @@
data_type : x data_type : x
optional : index, counts optional : index, counts
- op : unpool
args: (Tensor x, Tensor indices, int[] ksize, int[] strides = {1,1}, int[] paddings ={0,0} ,IntArray output_size = {0,0}, str data_format="NCHW")
output: Tensor(out)
infer_meta:
func: UnpoolInferMeta
kernel:
func: unpool
data_type: x
backward: unpool_grad
- op : unsqueeze - op : unsqueeze
args : (Tensor x, IntArray axis = {}) args : (Tensor x, IntArray axis = {})
output : Tensor(out), Tensor(xshape) output : Tensor(out), Tensor(xshape)
......
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/core/compat/op_utils.h"
namespace phi {
KernelSignature UnpoolOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"unpool",
{"X", "Indices"},
{"ksize", "strides", "paddings", "output_size", "data_format"},
{"Out"});
}
KernelSignature UnpoolGradOpArgumentMapping(const ArgumentMappingContext& ctx) {
return KernelSignature(
"unpool_grad",
{"X", "Indices", "Out", "Out@GRAD"},
{"ksize", "strides", "paddings", "output_size", "data_format"},
{"X@GRAD"});
}
} // namespace phi
PD_REGISTER_ARG_MAPPING_FN(unpool, phi::UnpoolOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(unpool_grad, phi::UnpoolGradOpArgumentMapping);
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册