pool_op.cc 21.4 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/pool_op.h"
16
#include <unordered_map>
17 18 19 20 21 22
#ifdef PADDLE_WITH_CUDA
#include "paddle/fluid/platform/cudnn_helper.h"
#endif
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
23 24 25 26

namespace paddle {
namespace operators {

27 28
int PoolOutputSize(int input_size, int filter_size, int padding_1,
                   int padding_2, int stride, bool ceil_mode) {
29 30
  int output_size;
  if (!ceil_mode) {
31 32
    output_size =
        (input_size - filter_size + padding_1 + padding_2) / stride + 1;
33 34
  } else {
    output_size =
35 36 37
        (input_size - filter_size + padding_1 + padding_2 + stride - 1) /
            stride +
        1;
38
  }
39 40 41 42 43 44
  PADDLE_ENFORCE_GT(
      output_size, 0,
      "Due to the settings of padding(%d,%d), filter_size(%d) and "
      "stride(%d), the output size is less than 0, please check "
      "again. Input_size:%d",
      padding_1, padding_2, filter_size, stride, input_size);
45 46 47
  return output_size;
}

C
chengduo 已提交
48
void PoolOp::InferShape(framework::InferShapeContext* ctx) const {
49 50 51 52
  PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
                    "X(Input) of Pooling should not be null.");
  PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
                    "Out(Output) of Pooling should not be null.");
53

C
chengduoZH 已提交
54
  std::string pooling_type = ctx->Attrs().Get<std::string>("pooling_type");
55 56 57
  std::vector<int> ksize = ctx->Attrs().Get<std::vector<int>>("ksize");
  std::vector<int> strides = ctx->Attrs().Get<std::vector<int>>("strides");
  std::vector<int> paddings = ctx->Attrs().Get<std::vector<int>>("paddings");
58
  bool ceil_mode = ctx->Attrs().Get<bool>("ceil_mode");
59
  bool adaptive = ctx->Attrs().Get<bool>("adaptive");
60 61 62 63
  bool global_pooling = ctx->Attrs().Get<bool>("global_pooling");
  std::string data_format = ctx->Attrs().Get<std::string>("data_format");
  std::string padding_algorithm =
      ctx->Attrs().Get<std::string>("padding_algorithm");
64

65 66 67
  auto in_x_dims = ctx->GetInputDim("X");
  PADDLE_ENFORCE_EQ(in_x_dims.size() == 4 || in_x_dims.size() == 5, true,
                    "Pooling intput should be 4-D or 5-D tensor.");
68

69 70
  PADDLE_ENFORCE_EQ(in_x_dims.size() - ksize.size(), 2U,
                    "Input size and pooling size should be consistent.");
71 72 73
  PADDLE_ENFORCE_EQ(ksize.size(), strides.size(),
                    "Strides size and pooling size should be the same.");

74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90
  const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");

  // update paddings if "SAME" or global_pooling
  framework::DDim data_dims;
  if (channel_last) {
    data_dims = framework::slice_ddim(in_x_dims, 1, in_x_dims.size() - 1);
  } else {
    data_dims = framework::slice_ddim(in_x_dims, 2, in_x_dims.size());
  }
  UpdatePadding(&paddings, global_pooling, adaptive, padding_algorithm,
                data_dims, strides, ksize);

  if (global_pooling) {
    UpdateKsize(&ksize, data_dims);
  }

  std::vector<int64_t> output_shape;
91 92 93
  if (adaptive) {
    output_shape.insert(output_shape.end(), ksize.begin(), ksize.end());
  } else {
94 95
    for (size_t i = 0; i < data_dims.size(); ++i) {
      if ((!ctx->IsRuntime()) && (data_dims[i] < 0)) {
96
        output_shape.push_back(data_dims[i]);
K
Kaipeng Deng 已提交
97
      } else {
98 99 100
        output_shape.push_back(
            PoolOutputSize(data_dims[i], ksize[i], paddings[2 * i],
                           paddings[2 * i + 1], strides[i], ceil_mode));
K
Kaipeng Deng 已提交
101
      }
102
    }
103
  }
104 105 106 107 108 109 110 111 112 113

  // output_N = input_N
  output_shape.insert(output_shape.begin(), in_x_dims[0]);
  // output_C = input_C
  if (channel_last) {
    output_shape.push_back(in_x_dims[in_x_dims.size() - 1]);
  } else {
    output_shape.insert(output_shape.begin() + 1, in_x_dims[1]);
  }

114
  ctx->SetOutputDim("Out", framework::make_ddim(output_shape));
Y
Yang Yu 已提交
115
  ctx->ShareLoD("X", "Out");
116 117
}

118
framework::OpKernelType PoolOp::GetExpectedKernelType(
C
chengduo 已提交
119
    const framework::ExecutionContext& ctx) const {
120
  framework::LibraryType library_{framework::LibraryType::kPlain};
121
  std::string data_format = "AnyLayout";
M
mozga-intel 已提交
122 123
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
124
#ifdef PADDLE_WITH_CUDA
125 126
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
127 128
  }
#endif
129 130 131 132
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
133
    layout_ = framework::DataLayout::kMKLDNN;
134
  }
135
#endif
136

Y
Yu Yang 已提交
137 138
  return framework::OpKernelType(ctx.Input<Tensor>("X")->type(), ctx.GetPlace(),
                                 layout_, library_);
139 140
}

C
chengduo 已提交
141
void PoolOpGrad::InferShape(framework::InferShapeContext* ctx) const {
142 143 144
  PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true, "Input(X) must not be null.");
  PADDLE_ENFORCE_EQ(ctx->HasOutput(framework::GradVarName("X")), true,
                    "Input(X@GRAD) should not be null.");
145 146 147
  ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}

148
framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
C
chengduo 已提交
149
    const framework::ExecutionContext& ctx) const {
150
  framework::LibraryType library_{framework::LibraryType::kPlain};
151
  std::string data_format = "AnyLayout";
M
mozga-intel 已提交
152 153
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);

C
chengduoZH 已提交
154
#ifdef PADDLE_WITH_CUDA
155 156
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
157 158
  }
#endif
159 160 161 162
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
      platform::CanMKLDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
163
    layout_ = framework::DataLayout::kMKLDNN;
164
  }
165
#endif
166

Y
Yu Yang 已提交
167
  auto input_data_type = ctx.Input<Tensor>("X")->type();
K
Kexin Zhao 已提交
168 169 170 171 172 173
  if (input_data_type == framework::proto::VarType::FP16) {
    PADDLE_ENFORCE_EQ(library_, framework::LibraryType::kCUDNN,
                      "float16 can only be used when CUDNN is used");
  }
  return framework::OpKernelType(input_data_type, ctx.GetPlace(), layout_,
                                 library_);
174 175
}

Y
Yu Yang 已提交
176
void Pool2dOpMaker::Make() {
177 178
  AddInput(
      "X",
C
chengduoZH 已提交
179
      "(Tensor) The input tensor of pooling operator. "
K
kexinzhao 已提交
180 181 182
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
183
  AddOutput("Out",
K
kexinzhao 已提交
184 185 186 187
            "(Tensor) The output tensor of pooling operator. "
            "The format of output tensor is also NCHW, "
            "where N is batch size, C is the number of channels, "
            "H is the height of the feature, "
188
            "and W is the width of the feature.");
189

C
chengduoZH 已提交
190
  AddAttr<std::string>("pooling_type",
C
chengduoZH 已提交
191 192
                       "(string), pooling type, can be \"max\" for max-pooling "
                       "and \"avg\" for average-pooling.")
193
      .InEnum({"max", "avg"});
C
fix bug  
chengduoZH 已提交
194
  AddAttr<std::vector<int>>("ksize",
K
kexinzhao 已提交
195 196
                            "(vector<int>) The pooling window "
                            "size(height, width) of the pooling operator. "
C
chengduoZH 已提交
197
                            "If global_pooling = true, ksize and paddings will "
C
fix bug  
chengduoZH 已提交
198 199
                            "be ignored.");  // TODO(Chengduo): Add checker.
                                             // (Currently,
C
fix doc  
chengduoZH 已提交
200
  // TypedAttrChecker don't support vector type.)
201 202
  AddAttr<bool>(
      "global_pooling",
K
Kaipeng Deng 已提交
203 204 205
      "(bool) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored. "
      "Default False.")
206
      .SetDefault(false);
K
kexinzhao 已提交
207 208 209
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default {1, 1}), strides(height, "
                            "width) of pooling operator.")
210 211
      .SetDefault({1, 1});
  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
212 213 214
  // TypedAttrChecker don't support vector type.)
  AddAttr<std::vector<int>>(
      "paddings",
215 216
      "(vector<int>, default {0,0}), paddings(height_top, height_bottom, "
      "width_left, wifth_right) of pooling operator."
217
      "If global_pooling = true, paddings and kernel size will be ignored.")
218
      .SetDefault({0, 0});
219 220
  AddAttr<bool>(
      "exclusive",
K
Kaipeng Deng 已提交
221
      "(bool) When true, will exclude the zero-padding in the "
222
      "averaging calculating, otherwise, include the zero-padding. Note, it "
K
Kaipeng Deng 已提交
223 224
      "is only used when pooling_type is avg. The default is True. "
      "Default True.")
225
      .SetDefault(true);
226 227
  AddAttr<bool>(
      "adaptive",
K
Kaipeng Deng 已提交
228
      "(bool) When true, will perform adaptive pooling instead, "
229 230
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
K
Kaipeng Deng 已提交
231 232
      "pooling in each grid area to get output pooling value. "
      "Default False.")
233 234
      .SetDefault(false);

235 236
  AddAttr<bool>(
      "use_cudnn",
K
Kaipeng Deng 已提交
237
      "(bool) Only used in cudnn kernel, need install cudnn. Default False")
238
      .SetDefault(false);
239 240
  AddAttr<bool>(
      "ceil_mode",
K
Kaipeng Deng 已提交
241
      "(bool) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
242
      "output height and width. False is the default. If it is set to False, "
K
Kaipeng Deng 已提交
243
      "the floor function will be used. Default False")
244
      .SetDefault(false);
245
  AddAttr<bool>("use_mkldnn",
K
Kaipeng Deng 已提交
246
                "(bool) Only used in mkldnn kernel. Default False")
247
      .SetDefault(false);
248
  AddAttr<bool>("use_quantizer",
K
Kaipeng Deng 已提交
249
                "(bool) "
250 251
                "Set to true for operators that should be quantized and use "
                "int8 kernel. "
K
Kaipeng Deng 已提交
252
                "Only used on CPU. Default False")
253
      .SetDefault(false);
254 255 256 257 258 259
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
260
      .SetDefault("NCHW");
261 262 263 264 265
  AddAttr<bool>("is_test",
                "(bool, default false) Set to true for inference only, false "
                "for training. Some layers may run faster when this is true.")
      .SetDefault(false);

266 267 268 269 270 271
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
272
  // TODO(dzhwinter): need to registered layout transform function
273 274

  AddComment(R"DOC(
K
Kaipeng Deng 已提交
275 276 277
This operation calculates the pooling output based on
the input, pooling_type and pool_size, pool_stride, pool_padding parameters.
Input(X) and Output(Out) are in NCHW or NHWC format, where N is batch size, C is the
K
kexinzhao 已提交
278
number of channels, H is the height of the feature, and W is the width of the feature.
K
Kaipeng Deng 已提交
279
Parameters(pool_size, pool_stride, pool_padding) hold two integer elements.
C
fix doc  
chengduoZH 已提交
280
These two elements represent height and width, respectively.
C
chengduoZH 已提交
281 282
The input(X) size and output(Out) size may be different.

283
Example:
F
fengjiayi 已提交
284

C
chengduoZH 已提交
285
  Input:
F
fengjiayi 已提交
286

K
kexinzhao 已提交
287
       X shape: $(N, C, H_{in}, W_{in})$
F
fengjiayi 已提交
288

C
chengduoZH 已提交
289
  Output:
F
fengjiayi 已提交
290

K
kexinzhao 已提交
291
       Out shape: $(N, C, H_{out}, W_{out})$
F
fengjiayi 已提交
292

293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308
  For pool_padding = "SAME":
       $$
       H_{out} = \\frac{(H_{in} + strides[0] - 1)}{strides[0]}
       $$
       $$
       W_{out} = \\frac{(W_{in} + strides[1] - 1)}{strides[1]}
       $$

  For pool_padding = "VALID":
       $$
       H_{out} = \\frac{(H_{in} - ksize[0] + strides[0])}{strides[0]}
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[1] + strides[1])}{strides[1]}
       $$

309 310
  For ceil_mode = false:
       $$
311
       H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom}{strides[0]} + 1
F
fengjiayi 已提交
312 313
       $$
       $$
314
       W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right}{strides[1]} + 1
K
kexinzhao 已提交
315
       $$
316

317 318
  For ceil_mode = true:
       $$
319
       H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom + strides[0] - 1)}{strides[0]} + 1
F
fengjiayi 已提交
320 321
       $$
       $$
322
       W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right + strides[1] - 1)}{strides[1]} + 1
323
       $$
K
kexinzhao 已提交
324

325
  For exclusive = false:
326
       $$
327
       hstart = i * strides[0] - pad_height_top
328 329 330 331 332
       $$
       $$
       hend = hstart + ksize[0]
       $$
       $$
333
       wstart = j * strides[1] - pad_width_left
334 335 336 337 338 339 340
       $$
       $$
       wend = wstart + ksize[1]
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
       $$
341

342
  For exclusive = true:
343
       $$
344
       hstart = max(0, i * strides[0] - pad_height_top)
345 346 347 348 349
       $$
       $$
       hend = min(H, hstart + ksize[0])
       $$
       $$
350
       wstart = max(0, j * strides[1] - pad_width_left)
351 352 353 354 355 356 357
       $$
       $$
       wend = min(W, wstart + ksize[1])
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
       $$
358

359
)DOC");
360 361
}

C
chengduo 已提交
362 363 364 365 366 367 368 369
class PoolOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
  std::unordered_map<std::string, std::string> GetInputOutputWithSameType()
      const override {
    return std::unordered_map<std::string, std::string>{{"X", /*->*/ "Out"}};
  }
};

Y
Yu Yang 已提交
370
void Pool3dOpMaker::Make() {
K
kexinzhao 已提交
371 372
  AddInput("X",
           "(Tensor) The input tensor of pooling operator. "
373 374
           "The format of input tensor is NCDHW or NDHWC, where N is batch "
           "size, C is "
K
kexinzhao 已提交
375 376 377
           "the number of channels, and D, H and W is the depth, height and "
           "width of "
           "the feature, respectively.");
378
  AddOutput("Out",
C
chengduoZH 已提交
379
            "(Tensor) The output tensor of pooling operator."
380
            "The format of output tensor is also NCDHW or NDHWC, "
K
kexinzhao 已提交
381 382
            "where N is batch size, C is "
            "the number of channels, and D, H and W is the depth, height and "
383
            "width of the feature, respectively.");
384

C
chengduoZH 已提交
385
  AddAttr<std::string>("pooling_type",
K
kexinzhao 已提交
386
                       "(string) Pooling type, can be \"max\" for max-pooling "
C
chengduoZH 已提交
387
                       "and \"avg\" for average-pooling.")
388
      .InEnum({"max", "avg"});
K
kexinzhao 已提交
389 390 391 392
  AddAttr<std::vector<int>>(
      "ksize",
      "(vector<int>) The pooling window size(depth, height, "
      "width) of pooling operator. "
C
chengduoZH 已提交
393
      "If global_pooling = true, ksize and paddings will "
K
kexinzhao 已提交
394 395
      "be ignored.");  // TODO(Chengduo): Add checker.
                       // (Currently,
C
fix bug  
chengduoZH 已提交
396
  // TypedAttrChecker don't support vector type.)
C
chengduoZH 已提交
397 398
  AddAttr<bool>(
      "global_pooling",
K
Kaipeng Deng 已提交
399 400 401
      "(bool) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored. "
      "Default False")
402
      .SetDefault(false);
K
kexinzhao 已提交
403 404 405 406
  AddAttr<std::vector<int>>(
      "strides",
      "(vector<int>, default {1,1,1}) Strides(depth, height, "
      "width) of the pooling operator.")
407 408
      .SetDefault({1, 1, 1});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
C
fix bug  
chengduoZH 已提交
409 410
  AddAttr<std::vector<int>>(
      "paddings",
411 412 413 414
      "(vector<int>, default {0,0,0}), paddings(pad_depth_front, "
      "pad_depth_back, "
      "pad_height_top, pad_height_bottom, pad_width_left, pad_width_right"
      ") of pooling operator. "
C
chengduoZH 已提交
415
      "If global_pooling = true, ksize and paddings will be ignored.")
416 417
      .SetDefault({0, 0, 0});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
418 419
  AddAttr<bool>(
      "exclusive",
K
Kaipeng Deng 已提交
420
      "(bool) When true, will exclude the zero-padding in the "
421
      "averaging calculating, otherwise, include the zero-padding. Note, it "
K
Kaipeng Deng 已提交
422 423
      "is only used when pooling_type is avg. The default is True. "
      "Default True")
424
      .SetDefault(true);
425 426
  AddAttr<bool>(
      "adaptive",
K
Kaipeng Deng 已提交
427
      "(bool) When true, will perform adaptive pooling instead, "
428 429
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
K
Kaipeng Deng 已提交
430 431
      "pooling in each grid area to get output pooling value. "
      "Default False")
432
      .SetDefault(false);
433

434 435
  AddAttr<bool>(
      "use_cudnn",
K
Kaipeng Deng 已提交
436
      "(bool) Only used in cudnn kernel, need install cudnn. Default False")
437
      .SetDefault(false);
438 439
  AddAttr<bool>(
      "ceil_mode",
K
Kaipeng Deng 已提交
440
      "(bool) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
441
      "output height and width. False is the default. If it is set to False, "
K
Kaipeng Deng 已提交
442
      "the floor function will be used. Default False")
443
      .SetDefault(false);
444
  AddAttr<bool>("use_mkldnn",
K
Kaipeng Deng 已提交
445
                "(bool) Only used in mkldnn kernel. Default False")
446
      .SetDefault(false);
447 448
  AddAttr<std::string>(
      "data_format",
449 450 451
      "(string, default NCDHW) Only used in "
      "An optional string from: \"NDHWC\", \"NCDHW\". "
      "Defaults to \"NDHWC\". Specify the data format of the output data, "
452
      "the input will be transformed automatically. ")
453 454 455 456 457 458 459
      .SetDefault("NCDHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
460 461
  // TODO(dzhwinter): need to registered layout transform function

462
  AddComment(R"DOC(
K
Kaipeng Deng 已提交
463 464
This operation calculates the output based on
the input, pooling_type, pool_size, pool_stride, and pool_padding parameters.
465
Input(X) and output(Out) are in NCDHW or NDHWC format, where N is batch
K
kexinzhao 已提交
466
size, C is the number of channels, and D, H and W are the depth, height and
K
Kaipeng Deng 已提交
467 468
width of the feature, respectively. Parameters(pool_size, pool_stride, pool_padding)
hold three integer elements. These three elements represent depth, height and
K
kexinzhao 已提交
469
width, respectively. The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
470 471 472

Example:
  Input:
K
kexinzhao 已提交
473
       X shape: $(N, C, D_{in}, H_{in}, W_{in})$
C
chengduoZH 已提交
474
  Output:
K
kexinzhao 已提交
475
       Out shape: $(N, C, D_{out}, H_{out}, W_{out})$
476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498

  For pool_padding = "SAME":
       $$
       D_{out} = \\frac{(D_{in} + strides[0] - 1)}{strides[0]}
       $$
       $$
       H_{out} = \\frac{(H_{in} + strides[1] - 1)}{strides[1]}
       $$
       $$
       W_{out} = \\frac{(W_{in} + strides[2] - 1)}{strides[2]}
       $$

  For pool_padding = "VALID":
       $$
       D_{out} = \\frac{(D_{in} - ksize[0] + strides[0])}{strides[0]}
       $$
       $$
       H_{out} = \\frac{(H_{in} - ksize[1] + strides[1])}{strides[1]}
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[2] + strides[2])}{strides[2]}
       $$

499
  For ceil_mode = false:
500
       $$
501
       D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back)}{strides[0]} + 1
502 503
       $$
       $$
504
       H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom)}{strides[1]} + 1
505 506
       $$
       $$
507
       W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right)}{strides[2]} + 1
508
       $$
509
  For ceil_mode = true:
510
       $$
511
       D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back + strides[0] -1)}{strides[0]} + 1
512 513
       $$
       $$
514
       H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom + strides[1] -1)}{strides[1]} + 1
515 516
       $$
       $$
517
       W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right + strides[2] -1)}{strides[2]} + 1
518
       $$
D
dengkaipeng 已提交
519

520
  For exclusive = false:
521
       $$
522
       dstart = i * strides[0] - pad_depth_front
523 524 525 526 527
       $$
       $$
       dend = dstart + ksize[0]
       $$
       $$
528
       hstart = j * strides[1] - pad_height_top
529 530 531 532 533
       $$
       $$
       hend = hstart + ksize[1]
       $$
       $$
534
       wstart = k * strides[2] -  pad_width_left
535 536 537 538 539 540 541
       $$
       $$
       wend = wstart + ksize[2]
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{ksize[0] * ksize[1] * ksize[2]}
       $$
542

543
  For exclusive = true:
544
       $$
545
       dstart = max(0, i * strides[0] - pad_depth_front)
546 547 548 549 550
       $$
       $$
       dend = min(D, dstart + ksize[0])
       $$
       $$
551 552 553
       hstart = max(0, j * strides[1] - pad_height_top)
       $$
       $$
554 555 556
       hend = min(H, hstart + ksize[1])
       $$
       $$
557
       wstart = max(0, k * strides[2] - pad_width_left)
558 559 560 561 562 563 564
       $$
       $$
       wend = min(W, wstart + ksize[2])
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
       $$
K
kexinzhao 已提交
565

566
)DOC");
567
}
568 569 570 571 572
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

Y
Yang Yang 已提交
573
REGISTER_OPERATOR(pool2d, ops::PoolOp, ops::Pool2dOpMaker,
C
chengduo 已提交
574
                  ops::PoolOpInferVarType,
575 576
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(pool2d_grad, ops::PoolOpGrad);
577

Q
QI JUN 已提交
578 579 580 581 582
REGISTER_OP_CPU_KERNEL(
    pool2d, ops::PoolKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PoolKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    pool2d_grad, ops::PoolGradKernel<paddle::platform::CPUDeviceContext, float>,
583
    ops::PoolGradKernel<paddle::platform::CPUDeviceContext, double>);
584

Y
Yang Yang 已提交
585
REGISTER_OPERATOR(pool3d, ops::PoolOp, ops::Pool3dOpMaker,
C
chengduo 已提交
586
                  ops::PoolOpInferVarType,
587 588
                  paddle::framework::DefaultGradOpDescMaker<true>);
REGISTER_OPERATOR(pool3d_grad, ops::PoolOpGrad);
589

Q
QI JUN 已提交
590 591 592 593 594 595
REGISTER_OP_CPU_KERNEL(
    pool3d, ops::PoolKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PoolKernel<paddle::platform::CPUDeviceContext, double>);
REGISTER_OP_CPU_KERNEL(
    pool3d_grad, ops::PoolGradKernel<paddle::platform::CPUDeviceContext, float>,
    ops::PoolGradKernel<paddle::platform::CPUDeviceContext, double>);