unpool_op.cc 5.3 KB
Newer Older
S
sweetsky0901 已提交
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 *     Unless required by applicable law or agreed to in writing, software
 *     distributed under the License is distributed on an "AS IS" BASIS,
 *     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *     See the License for the specific language governing permissions and
 *     limitations under the License. */

#include "paddle/operators/unpool_op.h"
namespace paddle {
namespace operators {

using framework::Tensor;

class Unpool2dOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
S
sweetsky0901 已提交
23 24
  Unpool2dOpMaker(framework::OpProto* proto,  \
                  framework::OpAttrChecker* op_checker)
S
sweetsky0901 已提交
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42
      : OpProtoAndCheckerMaker(proto, op_checker) {
    AddInput("X",
        "(Tensor) The input tensor of unpool operator. "
        "The format of input tensor is NCHW. Where N is batch size, C is the "
        "number of channels, H and W is the height and width of feature.");
    AddInput("Y",
        "(Tensor) The input tensor of the indices given out by MaxPool2d. "
        "The format of input tensor is NCHW. Where N is batch size, C is the "
        "number of channels, H and W is the height and width of feature.");
    AddOutput("Out",
        "(Tensor) The output tensor of unpool operator."
        "The format of output tensor is also NCHW."
        "Where N is batch size, C is "
        "the number of channels, H and W is the height and "
        "width of feature.");
    AddAttr<std::vector<int>>("ksize",
        "(vector ), the unpooling window size(height, width) "
        "of unpooling operator.");
S
sweetsky0901 已提交
43 44
    AddAttr<std::vector<int>>("strides",
        "(vector, default:{1, 1}), "
S
sweetsky0901 已提交
45 46
        "strides(height, width) of unpooling operator.")
        .SetDefault({1, 1});
S
sweetsky0901 已提交
47 48
    AddAttr<std::vector<int>>("paddings",
        "(vector defalut:{0,0}), "
S
sweetsky0901 已提交
49 50
        "paddings(height, width) of unpooling operator.")
        .SetDefault({0, 0});
S
sweetsky0901 已提交
51
    AddAttr<std::string>("unpoolingtype",
S
sweetsky0901 已提交
52 53
        "(string), unpooling type, can be \"max\" for max-unpooling ")
        .InEnum({"max"});
S
sweetsky0901 已提交
54
    AddComment(R"DOC(
S
sweetsky0901 已提交
55 56 57 58 59 60
          "input: the input Tensor to invert"
          "indices: the indices given out by MaxPool2d"
          "ksize  – Size of the max pooling window."
          "stride – Stride of the max pooling window."
                   "It is set to kernel_size by default."
          "padding – Padding that was added to the input"
S
sweetsky0901 已提交
61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
        )DOC");
  }
};

int OutputSize(int input_size, int ksize, int padding, int stride) {
  int output_size = (input_size -1) * stride - 2 * padding + ksize;
  return output_size;
}

class UnpoolOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) of UnpoolOp"
                   "should not be null.");
    PADDLE_ENFORCE(ctx->HasInput("Y"), "Input(Y) of UnpoolOp"
                   "should not be null.");
    PADDLE_ENFORCE(ctx->HasOutput("Out"),
                   "Output(Out) of UnpoolOp should not be null.");

    auto in_x_dims = ctx->GetInputDim("X");
    auto in_y_dims = ctx->GetInputDim("Y");
S
sweetsky0901 已提交
83 84
    std::string unpoolingtype =  \
      ctx->Attrs().Get<std::string>("unpoolingtype");
S
sweetsky0901 已提交
85 86 87 88
    std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
    std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
    std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");

S
sweetsky0901 已提交
89 90 91 92 93 94 95
    PADDLE_ENFORCE(in_x_dims.size() == 4,
                    "Unpooling intput should be 4-D.");
    for (int i = 0; i < 4; ++i) {
      PADDLE_ENFORCE(in_x_dims[i] == in_y_dims[i],
                     "X size must be eq Y size!");
    }

S
sweetsky0901 已提交
96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119

    std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
    for (size_t i = 0; i < ksize.size(); ++i) {
      output_shape.push_back(
        OutputSize(in_x_dims[i + 2], ksize[i], paddings[i], strides[i]));
    }
    ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
  }
};

class UnpoolOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;
  void InferShape(framework::InferShapeContext* ctx) const override {
    PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) must not be null.");
    PADDLE_ENFORCE(ctx->HasOutput(framework::GradVarName("X")),
                                  "Input(X@GRAD) should not be null.");
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
  }
};
}    // namespace operators
}    // namespace paddle

namespace ops = paddle::operators;
S
sweetsky0901 已提交
120
REGISTER_OP(unpool, ops::UnpoolOp, ops::Unpool2dOpMaker, unpool_grad,
S
sweetsky0901 已提交
121
            ops::UnpoolOpGrad);
S
sweetsky0901 已提交
122
REGISTER_OP_CPU_KERNEL(unpool,
S
sweetsky0901 已提交
123 124
                       ops::UnpoolKernel<paddle::platform::CPUPlace, float>,
                       ops::UnpoolKernel<paddle::platform::CPUPlace, double>);
S
sweetsky0901 已提交
125 126 127 128
REGISTER_OP_CPU_KERNEL(unpool_grad,
                    ops::UnpoolGradKernel<paddle::platform::CPUPlace, float>,
                    ops::UnpoolGradKernel<paddle::platform::CPUPlace, double>);