pool_op.cc 20.3 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/pool_op.h"
16

17
#include <unordered_map>
18

F
From00 已提交
19 20
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
21
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
F
From00 已提交
22 23 24
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
25 26 27
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
28 29 30 31

namespace paddle {
namespace operators {

32 33 34
bool CanMKLDNNSupportPool(const framework::ExecutionContext& ctx) {
  if (ctx.Attr<bool>("adaptive") == false) return true;
  // (jczaja): oneDNN is supporting only unchangable in size pool window
35
  auto src_tz = phi::vectorize(ctx.Input<Tensor>("X")->dims());
36 37
  std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
  // Fast but not exhustive check
38 39
  return ((src_tz[src_tz.size() - 1] % ksize[1] == 0) &&
          (src_tz[src_tz.size() - 2] % ksize[0] == 0));
40 41
}

42
framework::OpKernelType PoolOp::GetExpectedKernelType(
C
chengduo 已提交
43
    const framework::ExecutionContext& ctx) const {
44
  framework::LibraryType library_{framework::LibraryType::kPlain};
45
  std::string data_format = "AnyLayout";
M
mozga-intel 已提交
46
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);
47
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
M
mozga-intel 已提交
48

49
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
50 51
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
52 53
  }
#endif
54 55
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
56
      this->CanMKLDNNBeUsed(ctx, data_type) && CanMKLDNNSupportPool(ctx)) {
57
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
58
    layout_ = framework::DataLayout::kMKLDNN;
59
  }
60
#endif
61

62
  return framework::OpKernelType(data_type, ctx.GetPlace(), layout_, library_);
63 64
}

65
framework::OpKernelType PoolOp::GetKernelTypeForVar(
66 67
    const std::string& var_name,
    const Tensor& tensor,
68 69 70 71 72 73 74 75 76 77 78
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  if ((expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
    auto dl = framework::StringToDataLayout(data_format);
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
    if (dl != framework::DataLayout::kAnyLayout) {
79 80
      return framework::OpKernelType(
          expected_kernel_type.data_type_, tensor.place(), dl);
81 82 83
    }
  }
#endif
84 85
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
86 87
}

88
framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
C
chengduo 已提交
89
    const framework::ExecutionContext& ctx) const {
90
  framework::LibraryType library_{framework::LibraryType::kPlain};
91
  std::string data_format = "AnyLayout";
M
mozga-intel 已提交
92
  framework::DataLayout layout_ = framework::StringToDataLayout(data_format);
93
  auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
M
mozga-intel 已提交
94

95
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
96 97
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
C
chengduoZH 已提交
98 99
  }
#endif
100 101
#ifdef PADDLE_WITH_MKLDNN
  if (library_ == framework::LibraryType::kPlain &&
102 103
      this->CanMKLDNNBeUsed(ctx, input_data_type) &&
      CanMKLDNNSupportPool(ctx)) {
104
    library_ = framework::LibraryType::kMKLDNN;
M
mozga-intel 已提交
105
    layout_ = framework::DataLayout::kMKLDNN;
106
  }
107
#endif
108

109 110
  return framework::OpKernelType(
      input_data_type, ctx.GetPlace(), layout_, library_);
111 112
}

113
framework::OpKernelType PoolOpGrad::GetKernelTypeForVar(
114 115
    const std::string& var_name,
    const Tensor& tensor,
116 117 118 119 120 121 122 123 124 125 126 127
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
  if ((expected_kernel_type.data_layout_ == framework::DataLayout::kMKLDNN) &&
      (tensor.layout() != framework::DataLayout::kMKLDNN)) {
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(),
                                   framework::StringToDataLayout(data_format));
  }
#endif
128 129
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
130 131
}

Y
Yu Yang 已提交
132
void Pool2dOpMaker::Make() {
133 134
  AddInput(
      "X",
C
chengduoZH 已提交
135
      "(Tensor) The input tensor of pooling operator. "
K
kexinzhao 已提交
136 137 138
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
139
  AddOutput("Out",
K
kexinzhao 已提交
140 141 142 143
            "(Tensor) The output tensor of pooling operator. "
            "The format of output tensor is also NCHW, "
            "where N is batch size, C is the number of channels, "
            "H is the height of the feature, "
144
            "and W is the width of the feature.");
145

C
chengduoZH 已提交
146
  AddAttr<std::string>("pooling_type",
C
chengduoZH 已提交
147 148
                       "(string), pooling type, can be \"max\" for max-pooling "
                       "and \"avg\" for average-pooling.")
149
      .InEnum({"max", "avg"});
C
fix bug  
chengduoZH 已提交
150
  AddAttr<std::vector<int>>("ksize",
K
kexinzhao 已提交
151 152
                            "(vector<int>) The pooling window "
                            "size(height, width) of the pooling operator. "
C
chengduoZH 已提交
153
                            "If global_pooling = true, ksize and paddings will "
154 155
                            "be ignored.")
      .SupportTensor();
156 157
  AddAttr<bool>(
      "global_pooling",
K
Kaipeng Deng 已提交
158 159 160
      "(bool) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored. "
      "Default False.")
161
      .SetDefault(false);
K
kexinzhao 已提交
162 163 164
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default {1, 1}), strides(height, "
                            "width) of pooling operator.")
165 166
      .SetDefault({1, 1});
  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
167 168 169
  // TypedAttrChecker don't support vector type.)
  AddAttr<std::vector<int>>(
      "paddings",
170 171
      "(vector<int>, default {0,0}), paddings(height_top, height_bottom, "
      "width_left, wifth_right) of pooling operator."
172
      "If global_pooling = true, paddings and kernel size will be ignored.")
173
      .SetDefault({0, 0});
174 175
  AddAttr<bool>(
      "exclusive",
K
Kaipeng Deng 已提交
176
      "(bool) When true, will exclude the zero-padding in the "
177
      "averaging calculating, otherwise, include the zero-padding. Note, it "
K
Kaipeng Deng 已提交
178 179
      "is only used when pooling_type is avg. The default is True. "
      "Default True.")
180
      .SetDefault(true);
181 182
  AddAttr<bool>(
      "adaptive",
K
Kaipeng Deng 已提交
183
      "(bool) When true, will perform adaptive pooling instead, "
184 185
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
K
Kaipeng Deng 已提交
186 187
      "pooling in each grid area to get output pooling value. "
      "Default False.")
188
      .SetDefault(false);
189 190
  AddAttr<bool>(
      "ceil_mode",
K
Kaipeng Deng 已提交
191
      "(bool) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
192
      "output height and width. False is the default. If it is set to False, "
K
Kaipeng Deng 已提交
193
      "the floor function will be used. Default False")
194
      .SetDefault(false);
195 196 197 198 199 200
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
201 202 203 204 205 206 207
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
208
  // TODO(dzhwinter): need to registered layout transform function
209 210 211 212 213
  AddAttr<bool>(
      "use_cudnn",
      "(bool) Only used in cudnn kernel, need install cudnn. Default False")
      .SetDefault(false)
      .AsExtra();
214
  AddComment(R"DOC(
K
Kaipeng Deng 已提交
215 216 217
This operation calculates the pooling output based on
the input, pooling_type and pool_size, pool_stride, pool_padding parameters.
Input(X) and Output(Out) are in NCHW or NHWC format, where N is batch size, C is the
K
kexinzhao 已提交
218
number of channels, H is the height of the feature, and W is the width of the feature.
K
Kaipeng Deng 已提交
219
Parameters(pool_size, pool_stride, pool_padding) hold two integer elements.
C
fix doc  
chengduoZH 已提交
220
These two elements represent height and width, respectively.
C
chengduoZH 已提交
221 222
The input(X) size and output(Out) size may be different.

223
Example:
F
fengjiayi 已提交
224

C
chengduoZH 已提交
225
  Input:
F
fengjiayi 已提交
226

K
kexinzhao 已提交
227
       X shape: $(N, C, H_{in}, W_{in})$
F
fengjiayi 已提交
228

C
chengduoZH 已提交
229
  Output:
F
fengjiayi 已提交
230

K
kexinzhao 已提交
231
       Out shape: $(N, C, H_{out}, W_{out})$
F
fengjiayi 已提交
232

233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
  For pool_padding = "SAME":
       $$
       H_{out} = \\frac{(H_{in} + strides[0] - 1)}{strides[0]}
       $$
       $$
       W_{out} = \\frac{(W_{in} + strides[1] - 1)}{strides[1]}
       $$

  For pool_padding = "VALID":
       $$
       H_{out} = \\frac{(H_{in} - ksize[0] + strides[0])}{strides[0]}
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[1] + strides[1])}{strides[1]}
       $$

249 250
  For ceil_mode = false:
       $$
251
       H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom}{strides[0]} + 1
F
fengjiayi 已提交
252 253
       $$
       $$
254
       W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right}{strides[1]} + 1
K
kexinzhao 已提交
255
       $$
256

257 258
  For ceil_mode = true:
       $$
259
       H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom + strides[0] - 1)}{strides[0]} + 1
F
fengjiayi 已提交
260 261
       $$
       $$
262
       W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right + strides[1] - 1)}{strides[1]} + 1
263
       $$
K
kexinzhao 已提交
264

265
  For exclusive = false:
266
       $$
267
       hstart = i * strides[0] - pad_height_top
268 269 270 271 272
       $$
       $$
       hend = hstart + ksize[0]
       $$
       $$
273
       wstart = j * strides[1] - pad_width_left
274 275 276 277 278 279 280
       $$
       $$
       wend = wstart + ksize[1]
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
       $$
281

282
  For exclusive = true:
283
       $$
284
       hstart = max(0, i * strides[0] - pad_height_top)
285 286 287 288 289
       $$
       $$
       hend = min(H, hstart + ksize[0])
       $$
       $$
290
       wstart = max(0, j * strides[1] - pad_width_left)
291 292 293 294 295 296 297
       $$
       $$
       wend = min(W, wstart + ksize[1])
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
       $$
298

299
)DOC");
300 301
}

302 303 304 305 306 307 308
template <typename T>
class Pool2dOpGradGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> grad_op) const override {
F
From00 已提交
309
    grad_op->SetType("pool2d_double_grad");
310 311 312 313 314 315
    grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X")));
    grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out")));
    grad_op->SetAttrMap(this->Attrs());
  }
};

C
chengduo 已提交
316 317
class PoolOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
318
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
C
chengduo 已提交
319
      const override {
320 321
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
C
chengduo 已提交
322 323 324
  }
};

Y
Yu Yang 已提交
325
void Pool3dOpMaker::Make() {
K
kexinzhao 已提交
326 327
  AddInput("X",
           "(Tensor) The input tensor of pooling operator. "
328 329
           "The format of input tensor is NCDHW or NDHWC, where N is batch "
           "size, C is "
K
kexinzhao 已提交
330 331 332
           "the number of channels, and D, H and W is the depth, height and "
           "width of "
           "the feature, respectively.");
333
  AddOutput("Out",
C
chengduoZH 已提交
334
            "(Tensor) The output tensor of pooling operator."
335
            "The format of output tensor is also NCDHW or NDHWC, "
K
kexinzhao 已提交
336 337
            "where N is batch size, C is "
            "the number of channels, and D, H and W is the depth, height and "
338
            "width of the feature, respectively.");
339

C
chengduoZH 已提交
340
  AddAttr<std::string>("pooling_type",
K
kexinzhao 已提交
341
                       "(string) Pooling type, can be \"max\" for max-pooling "
C
chengduoZH 已提交
342
                       "and \"avg\" for average-pooling.")
343
      .InEnum({"max", "avg"});
K
kexinzhao 已提交
344 345 346 347
  AddAttr<std::vector<int>>(
      "ksize",
      "(vector<int>) The pooling window size(depth, height, "
      "width) of pooling operator. "
C
chengduoZH 已提交
348
      "If global_pooling = true, ksize and paddings will "
349
      "be ignored.");
C
chengduoZH 已提交
350 351
  AddAttr<bool>(
      "global_pooling",
K
Kaipeng Deng 已提交
352 353 354
      "(bool) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored. "
      "Default False")
355
      .SetDefault(false);
K
kexinzhao 已提交
356 357 358 359
  AddAttr<std::vector<int>>(
      "strides",
      "(vector<int>, default {1,1,1}) Strides(depth, height, "
      "width) of the pooling operator.")
360 361
      .SetDefault({1, 1, 1});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
C
fix bug  
chengduoZH 已提交
362 363
  AddAttr<std::vector<int>>(
      "paddings",
364 365 366 367
      "(vector<int>, default {0,0,0}), paddings(pad_depth_front, "
      "pad_depth_back, "
      "pad_height_top, pad_height_bottom, pad_width_left, pad_width_right"
      ") of pooling operator. "
C
chengduoZH 已提交
368
      "If global_pooling = true, ksize and paddings will be ignored.")
369 370
      .SetDefault({0, 0, 0});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
371 372
  AddAttr<bool>(
      "exclusive",
K
Kaipeng Deng 已提交
373
      "(bool) When true, will exclude the zero-padding in the "
374
      "averaging calculating, otherwise, include the zero-padding. Note, it "
K
Kaipeng Deng 已提交
375 376
      "is only used when pooling_type is avg. The default is True. "
      "Default True")
377
      .SetDefault(true);
378 379
  AddAttr<bool>(
      "adaptive",
K
Kaipeng Deng 已提交
380
      "(bool) When true, will perform adaptive pooling instead, "
381 382
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
K
Kaipeng Deng 已提交
383 384
      "pooling in each grid area to get output pooling value. "
      "Default False")
385
      .SetDefault(false);
386 387
  AddAttr<bool>(
      "ceil_mode",
K
Kaipeng Deng 已提交
388
      "(bool) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
389
      "output height and width. False is the default. If it is set to False, "
K
Kaipeng Deng 已提交
390
      "the floor function will be used. Default False")
391
      .SetDefault(false);
392 393
  AddAttr<std::string>(
      "data_format",
394 395 396
      "(string, default NCDHW) Only used in "
      "An optional string from: \"NDHWC\", \"NCDHW\". "
      "Defaults to \"NDHWC\". Specify the data format of the output data, "
397
      "the input will be transformed automatically. ")
398 399 400 401 402 403 404
      .SetDefault("NCDHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
405 406 407 408 409
  AddAttr<bool>(
      "use_cudnn",
      "(bool) Only used in cudnn kernel, need install cudnn. Default False")
      .SetDefault(false)
      .AsExtra();
410
  AddComment(R"DOC(
K
Kaipeng Deng 已提交
411 412
This operation calculates the output based on
the input, pooling_type, pool_size, pool_stride, and pool_padding parameters.
413
Input(X) and output(Out) are in NCDHW or NDHWC format, where N is batch
K
kexinzhao 已提交
414
size, C is the number of channels, and D, H and W are the depth, height and
K
Kaipeng Deng 已提交
415 416
width of the feature, respectively. Parameters(pool_size, pool_stride, pool_padding)
hold three integer elements. These three elements represent depth, height and
K
kexinzhao 已提交
417
width, respectively. The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
418 419 420

Example:
  Input:
K
kexinzhao 已提交
421
       X shape: $(N, C, D_{in}, H_{in}, W_{in})$
C
chengduoZH 已提交
422
  Output:
K
kexinzhao 已提交
423
       Out shape: $(N, C, D_{out}, H_{out}, W_{out})$
424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446

  For pool_padding = "SAME":
       $$
       D_{out} = \\frac{(D_{in} + strides[0] - 1)}{strides[0]}
       $$
       $$
       H_{out} = \\frac{(H_{in} + strides[1] - 1)}{strides[1]}
       $$
       $$
       W_{out} = \\frac{(W_{in} + strides[2] - 1)}{strides[2]}
       $$

  For pool_padding = "VALID":
       $$
       D_{out} = \\frac{(D_{in} - ksize[0] + strides[0])}{strides[0]}
       $$
       $$
       H_{out} = \\frac{(H_{in} - ksize[1] + strides[1])}{strides[1]}
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[2] + strides[2])}{strides[2]}
       $$

447
  For ceil_mode = false:
448
       $$
449
       D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back)}{strides[0]} + 1
450 451
       $$
       $$
452
       H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom)}{strides[1]} + 1
453 454
       $$
       $$
455
       W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right)}{strides[2]} + 1
456
       $$
457
  For ceil_mode = true:
458
       $$
459
       D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back + strides[0] -1)}{strides[0]} + 1
460 461
       $$
       $$
462
       H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom + strides[1] -1)}{strides[1]} + 1
463 464
       $$
       $$
465
       W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right + strides[2] -1)}{strides[2]} + 1
466
       $$
D
dengkaipeng 已提交
467

468
  For exclusive = false:
469
       $$
470
       dstart = i * strides[0] - pad_depth_front
471 472 473 474 475
       $$
       $$
       dend = dstart + ksize[0]
       $$
       $$
476
       hstart = j * strides[1] - pad_height_top
477 478 479 480 481
       $$
       $$
       hend = hstart + ksize[1]
       $$
       $$
482
       wstart = k * strides[2] -  pad_width_left
483 484 485 486 487 488 489
       $$
       $$
       wend = wstart + ksize[2]
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{ksize[0] * ksize[1] * ksize[2]}
       $$
490

491
  For exclusive = true:
492
       $$
493
       dstart = max(0, i * strides[0] - pad_depth_front)
494 495 496 497 498
       $$
       $$
       dend = min(D, dstart + ksize[0])
       $$
       $$
499 500 501
       hstart = max(0, j * strides[1] - pad_height_top)
       $$
       $$
502 503 504
       hend = min(H, hstart + ksize[1])
       $$
       $$
505
       wstart = max(0, k * strides[2] - pad_width_left)
506 507 508 509 510 511 512
       $$
       $$
       wend = min(W, wstart + ksize[2])
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
       $$
K
kexinzhao 已提交
513

514
)DOC");
515
}
516 517 518 519 520
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

521 522
DECLARE_INFER_SHAPE_FUNCTOR(pool2d,
                            Pool2dInferShapeFunctor,
523
                            PD_INFER_META(phi::Pool2DInferMeta));
524 525
DECLARE_INFER_SHAPE_FUNCTOR(pool2d_grad,
                            Pool2dGradInferShapeFunctor,
526
                            PD_INFER_META(phi::UnchangedInferMeta));
F
From00 已提交
527 528
DECLARE_INFER_SHAPE_FUNCTOR(pool2d_double_grad,
                            Pool2dDoubleGradInferShapeFunctor,
529
                            PD_INFER_META(phi::Pool2DInferMeta));
F
From00 已提交
530

H
hong 已提交
531
REGISTER_OPERATOR(
532 533 534 535
    pool2d,
    ops::PoolOp,
    ops::Pool2dOpMaker,
    ops::PoolOpInferVarType,
H
hong 已提交
536
    paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
F
From00 已提交
537 538
    paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
    Pool2dInferShapeFunctor);
539 540
REGISTER_OPERATOR(pool2d_grad,
                  ops::PoolOpGrad,
541
                  ops::Pool2dOpGradGradMaker<paddle::framework::OpDesc>,
F
From00 已提交
542 543
                  ops::Pool2dOpGradGradMaker<paddle::imperative::OpBase>,
                  Pool2dGradInferShapeFunctor);
544 545
REGISTER_OPERATOR(pool2d_double_grad,
                  ops::PoolOp,
F
From00 已提交
546 547
                  Pool2dDoubleGradInferShapeFunctor);

548 549
DECLARE_INFER_SHAPE_FUNCTOR(pool3d,
                            Pool3dInferShapeFunctor,
F
From00 已提交
550
                            PD_INFER_META(phi::PoolInferMeta));
551 552
DECLARE_INFER_SHAPE_FUNCTOR(pool3d_grad,
                            Pool3dGradInferShapeFunctor,
553
                            PD_INFER_META(phi::UnchangedInferMeta));
554

H
hong 已提交
555
REGISTER_OPERATOR(
556 557 558 559
    pool3d,
    ops::PoolOp,
    ops::Pool3dOpMaker,
    ops::PoolOpInferVarType,
H
hong 已提交
560
    paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
F
From00 已提交
561 562 563
    paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
    Pool3dInferShapeFunctor);
REGISTER_OPERATOR(pool3d_grad, ops::PoolOpGrad, Pool3dGradInferShapeFunctor);