pool_with_index_op.cc 15.6 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
C
chengduoZH 已提交
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/pool_with_index_op.h"
16
#include <memory>
C
chengduoZH 已提交
17 18 19 20

namespace paddle {
namespace operators {

Y
Yang Yang 已提交
21
inline int MaxPoolOutputSize(int input_size, int filter_size, int padding,
C
chengduoZH 已提交
22
                             int stride) {
C
chengduoZH 已提交
23 24 25 26 27 28 29 30
  int output_size = (input_size - filter_size + 2 * padding) / stride + 1;
  return output_size;
}

class MaxPoolWithIndexOp : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

C
fix doc  
chengduoZH 已提交
31
  void InferShape(framework::InferShapeContext *ctx) const override {
32 33 34 35 36 37 38 39 40
    PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
                      platform::errors::InvalidArgument(
                          "Input(X) of Pooling should not be null."));
    PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
                      platform::errors::InvalidArgument(
                          "Output(Out) of Pooling should not be null."));
    PADDLE_ENFORCE_EQ(ctx->HasOutput("Mask"), true,
                      platform::errors::InvalidArgument(
                          "Output(Mask) of Pooling should not be null."));
C
chengduoZH 已提交
41 42 43 44 45 46

    auto in_x_dims = ctx->GetInputDim("X");

    std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
    std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
    std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
47
    bool adaptive = ctx->Attrs().Get<bool>("adaptive");
C
chengduoZH 已提交
48

49 50 51 52 53
    PADDLE_ENFORCE(
        in_x_dims.size() == 4 || in_x_dims.size() == 5,
        platform::errors::InvalidArgument("Pooling intput should be 4-D or 5-D "
                                          "tensor but received %dD-Tensor",
                                          in_x_dims.size()));
C
chengduoZH 已提交
54

C
chengduoZH 已提交
55
    if (ctx->Attrs().Get<bool>("global_pooling")) {
C
chengduoZH 已提交
56
      ksize.resize(static_cast<size_t>(in_x_dims.size()) - 2);
C
fix bug  
chengduoZH 已提交
57 58
      for (size_t i = 0; i < ksize.size(); ++i) {
        paddings[i] = 0;
C
chengduoZH 已提交
59
        ksize[i] = static_cast<int>(in_x_dims[i + 2]);
C
fix bug  
chengduoZH 已提交
60
      }
C
chengduoZH 已提交
61 62
    }

63 64 65 66 67 68 69 70 71 72
    PADDLE_ENFORCE_EQ(
        in_x_dims.size() - ksize.size(), 2U,
        platform::errors::InvalidArgument(
            "The input size %d minus the kernel size %d should equal to 2.",
            in_x_dims.size(), ksize.size()));
    PADDLE_ENFORCE_EQ(
        ksize.size(), strides.size(),
        platform::errors::InvalidArgument(
            "Strides size %d and pooling size %d should be the same.",
            strides.size(), ksize.size()));
73 74 75
    PADDLE_ENFORCE_EQ(
        ksize.size(), paddings.size(),
        platform::errors::InvalidArgument(
76 77
            "Paddings size %d and pooling size %d should be the same.",
            paddings.size(), ksize.size()));
C
chengduoZH 已提交
78 79

    std::vector<int64_t> output_shape({in_x_dims[0], in_x_dims[1]});
80 81 82 83
    if (adaptive) {
      output_shape.insert(output_shape.end(), ksize.begin(), ksize.end());
    } else {
      for (size_t i = 0; i < ksize.size(); ++i) {
84 85 86 87 88 89
        if ((!ctx->IsRuntime()) && (in_x_dims[i + 2] < 0)) {
          output_shape.push_back(in_x_dims[i + 2]);
        } else {
          output_shape.push_back(MaxPoolOutputSize(in_x_dims[i + 2], ksize[i],
                                                   paddings[i], strides[i]));
        }
90
      }
C
chengduoZH 已提交
91
    }
92 93
    ctx->SetOutputDim("Out", phi::make_ddim(output_shape));
    ctx->SetOutputDim("Mask", phi::make_ddim(output_shape));
C
chengduoZH 已提交
94
  }
C
chengduoZH 已提交
95 96

 protected:
97
  framework::OpKernelType GetExpectedKernelType(
C
chengduoZH 已提交
98
      const framework::ExecutionContext &ctx) const override {
99 100 101
    return framework::OpKernelType(
        OperatorWithKernel::IndicateVarDataType(ctx, "X"),
        ctx.device_context());
C
chengduoZH 已提交
102
  }
C
chengduoZH 已提交
103 104 105 106 107 108
};

class MaxPoolWithIndexOpGrad : public framework::OperatorWithKernel {
 public:
  using framework::OperatorWithKernel::OperatorWithKernel;

C
fix doc  
chengduoZH 已提交
109
  void InferShape(framework::InferShapeContext *ctx) const override {
110 111
    PADDLE_ENFORCE_EQ(
        ctx->HasInput("Mask"), true,
112
        platform::errors::InvalidArgument("Input(Mask) must not be null."));
113
    PADDLE_ENFORCE_EQ(
114 115 116 117 118 119 120 121
        ctx->HasInput("X"), true,
        platform::errors::InvalidArgument("Input(X) must not be null."));
    PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
                      platform::errors::InvalidArgument(
                          "Input(Out@GRAD) should not be null."));
    PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
                      platform::errors::InvalidArgument(
                          "Output(X@GRAD) should not be null."));
C
chengduoZH 已提交
122 123
    ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
  }
C
chengduoZH 已提交
124 125

 protected:
126
  framework::OpKernelType GetExpectedKernelType(
C
chengduoZH 已提交
127
      const framework::ExecutionContext &ctx) const override {
128 129 130
    return framework::OpKernelType(OperatorWithKernel::IndicateVarDataType(
                                       ctx, framework::GradVarName("Out")),
                                   ctx.device_context());
C
chengduoZH 已提交
131
  }
C
chengduoZH 已提交
132 133 134 135
};

class MaxPool2dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
136
  void Make() override {
C
chengduoZH 已提交
137 138
    AddInput(
        "X",
K
kexinzhao 已提交
139 140 141 142
        "(Tensor) The input tensor of pooling operator. "
        "The format of input tensor is NCHW, where N is batch size, C is the "
        "number of channels, H is the height of the image, "
        "and W is the width of the image.");
C
chengduoZH 已提交
143
    AddOutput("Out",
K
kexinzhao 已提交
144 145 146 147 148
              "(Tensor) The output tensor of pooling operator. "
              "The format of output tensor is also NCHW, "
              "where N is batch size, C is "
              "the number of channels, H is the height of the image "
              "and W is the width of the image.");
C
chengduoZH 已提交
149
    AddOutput("Mask",
K
kexinzhao 已提交
150 151 152 153 154 155
              "(Tensor) The Mask tensor of pooling operator."
              "The format of output tensor is also NCHW, "
              "where N is batch size, C is the number of channels, "
              "H is the height of the image, "
              "and W is the width of the image. "
              "It represents the index in the current feature map.");
C
chengduoZH 已提交
156

C
fix bug  
chengduoZH 已提交
157
    AddAttr<std::vector<int>>("ksize",
K
kexinzhao 已提交
158 159
                              "(vector<int>) The pooling window size(height, "
                              "width) of pooling operator. "
C
chengduoZH 已提交
160
                              "If global_pooling = true, ksize and paddings "
C
fix bug  
chengduoZH 已提交
161 162
                              "will be ignored.");  // TODO(Chengduo): Add
                                                    // checker. (Currently,
C
fix doc  
chengduoZH 已提交
163
    // TypedAttrChecker don't support vector type.)
C
fix bug  
chengduoZH 已提交
164
    AddAttr<bool>(
C
chengduoZH 已提交
165
        "global_pooling",
C
chengduoZH 已提交
166
        "(bool, default:false) Whether to use the global pooling. "
C
chengduoZH 已提交
167
        "If global_pooling = true, ksize and paddings will be ignored.")
C
chengduoZH 已提交
168
        .SetDefault(false);
169 170 171 172 173 174 175 176
    AddAttr<bool>(
        "adaptive",
        "(bool, default False) When true, will perform adaptive pooling "
        "instead, "
        "output shape in H and W dimensions will be same as ksize, input data "
        "will be divided into grids specify by ksize averagely and perform "
        "pooling in each grid area to get output pooling value.")
        .SetDefault(false);
K
kexinzhao 已提交
177 178 179
    AddAttr<std::vector<int>>("strides",
                              "(vector<int>, default {1, 1}), strides(height, "
                              "width) of pooling operator.")
C
chengduoZH 已提交
180
        .SetDefault({1, 1});  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
181
    // TypedAttrChecker don't support vector type.)
C
chengduoZH 已提交
182 183
    AddAttr<std::vector<int>>(
        "paddings",
C
chengduoZH 已提交
184
        "(vector<int>, default:{0, 0}), paddings(height, width) of pooling "
K
kexinzhao 已提交
185
        "operator. "
C
chengduoZH 已提交
186
        "If global_pooling = true, paddings and will be ignored.")
C
chengduoZH 已提交
187
        .SetDefault({0, 0});  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
188
    // TypedAttrChecker don't support vector type.)
C
chengduoZH 已提交
189 190

    AddComment(R"DOC(
K
kexinzhao 已提交
191 192
MaxPool2d Operator.

C
chengduoZH 已提交
193
The maxPooling2d with index operation calculates the output and the mask
K
kexinzhao 已提交
194 195 196 197
based on the input, ksize, strides, and paddings parameters. Input(X) and
output(Out, Mask) are in NCHW format, where N is batch size, C is the
number of channels, H is the height of the feature, 
and W is the width of the feature.
C
chengduoZH 已提交
198 199
Parameters(ksize, strides, paddings) are two elements.
These two elements represent height and width, respectively.
C
chengduoZH 已提交
200 201 202 203
The input(X) size and output(Out, Mask) size may be different.

Example:
  Input:
K
kexinzhao 已提交
204
       X shape: $(N, C, H_{in}, W_{in})$
C
chengduoZH 已提交
205
  Output:
K
kexinzhao 已提交
206 207
       Out shape: $(N, C, H_{out}, W_{out})$
       Mask shape: $(N, C, H_{out}, W_{out})$
C
chengduoZH 已提交
208
  Where
K
kexinzhao 已提交
209
       $$
C
chengduoZH 已提交
210 211
       H_{out} = \frac{(H_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\
       W_{out} = \frac{(W_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1
K
kexinzhao 已提交
212
       $$
213 214 215 216 217 218
  
  For adaptive = true:
       $$
       H_{out} = ksize[0]   W_{out} = ksize[1]
       $$
      
K
kexinzhao 已提交
219

C
chengduoZH 已提交
220 221 222 223 224 225
)DOC");
  }
};

class MaxPool3dWithIndexOpMaker : public framework::OpProtoAndCheckerMaker {
 public:
Y
Yu Yang 已提交
226
  void Make() override {
K
kexinzhao 已提交
227 228 229 230 231 232
    AddInput("X",
             "(Tensor) The input tensor of pooling operator. "
             "The format of input tensor is NCDHW, where N is batch size, C is "
             "the number of channels, and D, H and W are the depth, height and "
             "width of "
             "the image, respectively");
C
chengduoZH 已提交
233
    AddOutput("Out",
K
kexinzhao 已提交
234 235 236 237 238
              "(Tensor) The output tensor of pooling operator. "
              "The format of output tensor is also NCDHW, "
              "where N is the batch size, C is the number of channels, "
              "and D, H and W are the depth, height and "
              "width of the image, respectively.");
C
chengduoZH 已提交
239
    AddOutput("Mask",
K
kexinzhao 已提交
240 241 242 243 244 245
              "(Tensor) The Mask tensor of pooling operator. "
              "The format of output tensor is also NCDHW, "
              "where N is the batch size, C is the number of channels, and "
              "D, H and W are the depth, height and width "
              "of the image, respectively. "
              "It represents the index in the current feature map.");
C
chengduoZH 已提交
246

C
fix bug  
chengduoZH 已提交
247
    AddAttr<std::vector<int>>("ksize",
K
kexinzhao 已提交
248 249
                              "(vector<int>) The pooling window size(depth, "
                              "height, width) of pooling operator. "
C
chengduoZH 已提交
250
                              "If global_pooling = true, ksize and paddings "
C
fix bug  
chengduoZH 已提交
251 252
                              "will be ignored.");  // TODO(Chengduo): Add
                                                    // checker. (Currently,
C
fix doc  
chengduoZH 已提交
253
    // TypedAttrChecker don't support vector type.)
C
fix bug  
chengduoZH 已提交
254
    AddAttr<bool>(
C
chengduoZH 已提交
255
        "global_pooling",
K
kexinzhao 已提交
256
        "(bool, default false) Whether to use the global pooling. "
C
chengduoZH 已提交
257
        "If global_pooling = true, ksize and paddings will be ignored.")
C
chengduoZH 已提交
258
        .SetDefault(false);
259 260 261 262 263 264 265 266
    AddAttr<bool>(
        "adaptive",
        "(bool, default False) When true, will perform adaptive pooling "
        "instead, "
        "output shape in H and W dimensions will be same as ksize, input data "
        "will be divided into grids specify by ksize averagely and perform "
        "pooling in each grid area to get output pooling value.")
        .SetDefault(false);
C
fix doc  
chengduoZH 已提交
267
    AddAttr<std::vector<int>>("strides",
K
kexinzhao 已提交
268
                              "(vector<int>, default {1,1,1}), strides(depth, "
C
fix doc  
chengduoZH 已提交
269
                              "height, width) of pooling operator.")
C
chengduoZH 已提交
270
        .SetDefault({1, 1, 1});  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
271
    // TypedAttrChecker don't support vector type.)
C
fix bug  
chengduoZH 已提交
272 273
    AddAttr<std::vector<int>>(
        "paddings",
C
chengduoZH 已提交
274
        "(vector, default {0,0,0}), paddings(depth, "
K
kexinzhao 已提交
275
        "height, width) of pooling operator. "
C
chengduoZH 已提交
276
        "If global_pooling = true, paddings and ksize will be ignored.")
C
chengduoZH 已提交
277
        .SetDefault({0, 0, 0});  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
278
    // TypedAttrChecker don't support vector type.)
C
chengduoZH 已提交
279

C
chengduoZH 已提交
280
    AddComment(R"DOC(
K
kexinzhao 已提交
281 282
MaxPool3d Operator.

C
chengduoZH 已提交
283 284
The maxpooling3d with index operation calculates the output and the mask
based on the input and ksize, strides, paddings parameters.
K
kexinzhao 已提交
285 286 287 288
Input(X) and output(Out, Mask) are in NCDHW format, where N is batch
size, C is the number of channels, and D, H and W are the depth, height and
width of the feature, respectively. 
Parameters(ksize, strides, paddings) are three elements.
C
chengduoZH 已提交
289
These three elements represent depth, height and width, respectively.
C
chengduoZH 已提交
290 291 292 293
The input(X) size and output(Out, Mask) size may be different.

Example:
  Input:
K
kexinzhao 已提交
294
       X shape: $(N, C, D_{in}, H_{in}, W_{in})$
C
chengduoZH 已提交
295
  Output:
K
kexinzhao 已提交
296 297
       Out shape: $(N, C, D_{out}, H_{out}, W_{out})$
       Mask shape: $(N, C, D_{out}, H_{out}, W_{out})$
C
chengduoZH 已提交
298
  Where
K
kexinzhao 已提交
299
       $$
C
chengduoZH 已提交
300 301 302
       D_{out} = \frac{(D_{in} - ksize[0] + 2 * paddings[0])}{strides[0]} + 1 \\
       H_{out} = \frac{(H_{in} - ksize[1] + 2 * paddings[1])}{strides[1]} + 1 \\
       W_{out} = \frac{(W_{in} - ksize[2] + 2 * paddings[2])}{strides[2]} + 1
K
kexinzhao 已提交
303
       $$
304 305 306 307 308
  
  For adaptive = true:
       $$
       D_{out} = ksize[0]   H_{out} = ksize[1]   W_{out} = ksize[2]
       $$
K
kexinzhao 已提交
309

C
chengduoZH 已提交
310 311 312
)DOC");
  }
};
C
chengduoZH 已提交
313

314 315 316 317 318 319
template <typename T>
class MaxPoolWithIndexGradOpMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
320
  void Apply(GradOpPtr<T> op) const override {
321 322 323 324 325 326 327 328 329
    op->SetType(this->ForwardOpType() + "_grad");
    op->SetAttrMap(this->Attrs());
    op->SetInput("X", this->Input("X"));
    op->SetInput("Mask", this->Output("Mask"));
    op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
    op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
  }
};

Z
Zeng Jinle 已提交
330
DECLARE_NO_NEED_BUFFER_VARS_INFERER(
331
    MaxPoolWithIndexOpGradNoNeedBufferVarsInferer, "X");
332

C
chengduoZH 已提交
333 334 335 336 337
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

338 339 340 341
REGISTER_OPERATOR(max_pool2d_with_index, ops::MaxPoolWithIndexOp,
                  ops::MaxPool2dWithIndexOpMaker,
                  ops::MaxPoolWithIndexGradOpMaker<paddle::framework::OpDesc>,
                  ops::MaxPoolWithIndexGradOpMaker<paddle::imperative::OpBase>);
342
REGISTER_OPERATOR(max_pool2d_with_index_grad, ops::MaxPoolWithIndexOpGrad,
343
                  ops::MaxPoolWithIndexOpGradNoNeedBufferVarsInferer);
C
chengduoZH 已提交
344 345

REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
346
    max_pool2d_with_index,
Q
QI JUN 已提交
347 348 349
    ops::MaxPoolWithIndexKernel<paddle::platform::CPUDeviceContext, float, int>,
    ops::MaxPoolWithIndexKernel<paddle::platform::CPUDeviceContext, double,
                                int>);
C
chengduoZH 已提交
350
REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
351
    max_pool2d_with_index_grad,
Q
QI JUN 已提交
352 353 354
    ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUDeviceContext, float,
                                    int>,
    ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUDeviceContext, double,
355
                                    int>);
C
chengduoZH 已提交
356

357 358 359 360
REGISTER_OPERATOR(max_pool3d_with_index, ops::MaxPoolWithIndexOp,
                  ops::MaxPool3dWithIndexOpMaker,
                  ops::MaxPoolWithIndexGradOpMaker<paddle::framework::OpDesc>,
                  ops::MaxPoolWithIndexGradOpMaker<paddle::imperative::OpBase>);
361
REGISTER_OPERATOR(max_pool3d_with_index_grad, ops::MaxPoolWithIndexOpGrad,
362
                  ops::MaxPoolWithIndexOpGradNoNeedBufferVarsInferer);
C
chengduoZH 已提交
363 364

REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
365
    max_pool3d_with_index,
Q
QI JUN 已提交
366 367 368
    ops::MaxPoolWithIndexKernel<paddle::platform::CPUDeviceContext, float, int>,
    ops::MaxPoolWithIndexKernel<paddle::platform::CPUDeviceContext, double,
                                int>);
C
chengduoZH 已提交
369
REGISTER_OP_CPU_KERNEL(
C
chengduoZH 已提交
370
    max_pool3d_with_index_grad,
Q
QI JUN 已提交
371 372 373
    ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUDeviceContext, float,
                                    int>,
    ops::MaxPoolWithIndexGradKernel<paddle::platform::CPUDeviceContext, double,
374
                                    int>);