pool_op.cc 20.1 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/pool_op.h"
16

17
#include <unordered_map>
18

F
From00 已提交
19 20
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
21
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
F
From00 已提交
22 23 24
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
25 26 27
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
28 29 30 31

namespace paddle {
namespace operators {

32 33 34
bool CanMKLDNNSupportPool(const framework::ExecutionContext& ctx) {
  if (ctx.Attr<bool>("adaptive") == false) return true;
  // (jczaja): oneDNN is supporting only unchangable in size pool window
35
  auto src_tz = phi::vectorize(ctx.Input<phi::DenseTensor>("X")->dims());
36 37 38
  if (!ctx.HasAttr("ksize")) {
    return false;
  }
39 40
  std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
  // Fast but not exhustive check
41 42
  return ((src_tz[src_tz.size() - 1] % ksize[1] == 0) &&
          (src_tz[src_tz.size() - 2] % ksize[0] == 0));
43 44
}

45
framework::OpKernelType PoolOp::GetExpectedKernelType(
C
chengduo 已提交
46
    const framework::ExecutionContext& ctx) const {
47 48
  framework::LibraryType library_{framework::LibraryType::kPlain};
  phi::DataLayout layout_ = phi::DataLayout::kAnyLayout;
49
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
M
mozga-intel 已提交
50

51 52 53 54 55 56
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
  }
#endif

57 58 59
  // NOTE(jiahongyu): Below codes originally enclosed by PADDLE_WITH_MKLDNN
  this->SetDnnFallback(!CanMKLDNNSupportPool(ctx));
  // NOTE(jiahongyu) END: Above codes originally enclosed by PADDLE_WITH_MKLDNN
60

61
  return framework::OpKernelType(data_type, ctx.GetPlace(), layout_, library_);
62 63
}

64
framework::OpKernelType PoolOp::GetKernelTypeForVar(
65
    const std::string& var_name,
66
    const phi::DenseTensor& tensor,
67 68
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
69 70
  if ((expected_kernel_type.data_layout_ == phi::DataLayout::kMKLDNN) &&
      (tensor.layout() != phi::DataLayout::kMKLDNN)) {
71 72 73
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
74
    auto dl = phi::StringToDataLayout(data_format);
75 76
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
77
    if (dl != phi::DataLayout::kAnyLayout) {
78 79
      return framework::OpKernelType(
          expected_kernel_type.data_type_, tensor.place(), dl);
80 81 82
    }
  }
#endif
83 84
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
85 86
}

87
framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
C
chengduo 已提交
88
    const framework::ExecutionContext& ctx) const {
89 90
  framework::LibraryType library_{framework::LibraryType::kPlain};
  phi::DataLayout layout_ = phi::DataLayout::kAnyLayout;
91
  auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
M
mozga-intel 已提交
92

93 94 95 96 97 98
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
  if (platform::CanCUDNNBeUsed(ctx)) {
    library_ = framework::LibraryType::kCUDNN;
  }
#endif

99 100 101
  // NOTE(jiahongyu): Below codes originally enclosed by PADDLE_WITH_MKLDNN
  this->SetDnnFallback(!CanMKLDNNSupportPool(ctx));
  // NOTE(jiahongyu): Above codes originally enclosed by PADDLE_WITH_MKLDNN
102

103 104
  return framework::OpKernelType(
      input_data_type, ctx.GetPlace(), layout_, library_);
105 106
}

107
framework::OpKernelType PoolOpGrad::GetKernelTypeForVar(
108
    const std::string& var_name,
109
    const phi::DenseTensor& tensor,
110 111
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
112 113
  if ((expected_kernel_type.data_layout_ == phi::DataLayout::kMKLDNN) &&
      (tensor.layout() != phi::DataLayout::kMKLDNN)) {
114 115 116 117 118
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(),
119
                                   phi::StringToDataLayout(data_format));
120 121
  }
#endif
122 123
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
124 125
}

Y
Yu Yang 已提交
126
void Pool2dOpMaker::Make() {
127 128
  AddInput(
      "X",
C
chengduoZH 已提交
129
      "(Tensor) The input tensor of pooling operator. "
K
kexinzhao 已提交
130 131 132
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
133
  AddOutput("Out",
K
kexinzhao 已提交
134 135 136 137
            "(Tensor) The output tensor of pooling operator. "
            "The format of output tensor is also NCHW, "
            "where N is batch size, C is the number of channels, "
            "H is the height of the feature, "
138
            "and W is the width of the feature.");
139

C
chengduoZH 已提交
140
  AddAttr<std::string>("pooling_type",
C
chengduoZH 已提交
141 142
                       "(string), pooling type, can be \"max\" for max-pooling "
                       "and \"avg\" for average-pooling.")
143
      .InEnum({"max", "avg"});
C
fix bug  
chengduoZH 已提交
144
  AddAttr<std::vector<int>>("ksize",
K
kexinzhao 已提交
145 146
                            "(vector<int>) The pooling window "
                            "size(height, width) of the pooling operator. "
C
chengduoZH 已提交
147
                            "If global_pooling = true, ksize and paddings will "
148 149
                            "be ignored.")
      .SupportTensor();
150 151
  AddAttr<bool>(
      "global_pooling",
K
Kaipeng Deng 已提交
152 153 154
      "(bool) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored. "
      "Default False.")
155
      .SetDefault(false);
K
kexinzhao 已提交
156 157 158
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default {1, 1}), strides(height, "
                            "width) of pooling operator.")
159 160
      .SetDefault({1, 1});
  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
161 162 163
  // TypedAttrChecker don't support vector type.)
  AddAttr<std::vector<int>>(
      "paddings",
164 165
      "(vector<int>, default {0,0}), paddings(height_top, height_bottom, "
      "width_left, wifth_right) of pooling operator."
166
      "If global_pooling = true, paddings and kernel size will be ignored.")
167
      .SetDefault({0, 0});
168 169
  AddAttr<bool>(
      "exclusive",
K
Kaipeng Deng 已提交
170
      "(bool) When true, will exclude the zero-padding in the "
171
      "averaging calculating, otherwise, include the zero-padding. Note, it "
K
Kaipeng Deng 已提交
172 173
      "is only used when pooling_type is avg. The default is True. "
      "Default True.")
174
      .SetDefault(true);
175 176
  AddAttr<bool>(
      "adaptive",
K
Kaipeng Deng 已提交
177
      "(bool) When true, will perform adaptive pooling instead, "
178 179
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
K
Kaipeng Deng 已提交
180 181
      "pooling in each grid area to get output pooling value. "
      "Default False.")
182
      .SetDefault(false);
183 184
  AddAttr<bool>(
      "ceil_mode",
K
Kaipeng Deng 已提交
185
      "(bool) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
186
      "output height and width. False is the default. If it is set to False, "
K
Kaipeng Deng 已提交
187
      "the floor function will be used. Default False")
188
      .SetDefault(false);
189 190 191 192 193 194
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
195 196 197 198 199 200 201
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
202
  // TODO(dzhwinter): need to registered layout transform function
203 204 205 206 207
  AddAttr<bool>(
      "use_cudnn",
      "(bool) Only used in cudnn kernel, need install cudnn. Default False")
      .SetDefault(false)
      .AsExtra();
208
  AddComment(R"DOC(
K
Kaipeng Deng 已提交
209 210 211
This operation calculates the pooling output based on
the input, pooling_type and pool_size, pool_stride, pool_padding parameters.
Input(X) and Output(Out) are in NCHW or NHWC format, where N is batch size, C is the
K
kexinzhao 已提交
212
number of channels, H is the height of the feature, and W is the width of the feature.
K
Kaipeng Deng 已提交
213
Parameters(pool_size, pool_stride, pool_padding) hold two integer elements.
C
fix doc  
chengduoZH 已提交
214
These two elements represent height and width, respectively.
C
chengduoZH 已提交
215 216
The input(X) size and output(Out) size may be different.

217
Example:
F
fengjiayi 已提交
218

C
chengduoZH 已提交
219
  Input:
F
fengjiayi 已提交
220

K
kexinzhao 已提交
221
       X shape: $(N, C, H_{in}, W_{in})$
F
fengjiayi 已提交
222

C
chengduoZH 已提交
223
  Output:
F
fengjiayi 已提交
224

K
kexinzhao 已提交
225
       Out shape: $(N, C, H_{out}, W_{out})$
F
fengjiayi 已提交
226

227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242
  For pool_padding = "SAME":
       $$
       H_{out} = \\frac{(H_{in} + strides[0] - 1)}{strides[0]}
       $$
       $$
       W_{out} = \\frac{(W_{in} + strides[1] - 1)}{strides[1]}
       $$

  For pool_padding = "VALID":
       $$
       H_{out} = \\frac{(H_{in} - ksize[0] + strides[0])}{strides[0]}
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[1] + strides[1])}{strides[1]}
       $$

243 244
  For ceil_mode = false:
       $$
245
       H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom}{strides[0]} + 1
F
fengjiayi 已提交
246 247
       $$
       $$
248
       W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right}{strides[1]} + 1
K
kexinzhao 已提交
249
       $$
250

251 252
  For ceil_mode = true:
       $$
253
       H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom + strides[0] - 1)}{strides[0]} + 1
F
fengjiayi 已提交
254 255
       $$
       $$
256
       W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right + strides[1] - 1)}{strides[1]} + 1
257
       $$
K
kexinzhao 已提交
258

259
  For exclusive = false:
260
       $$
261
       hstart = i * strides[0] - pad_height_top
262 263 264 265 266
       $$
       $$
       hend = hstart + ksize[0]
       $$
       $$
267
       wstart = j * strides[1] - pad_width_left
268 269 270 271 272 273 274
       $$
       $$
       wend = wstart + ksize[1]
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
       $$
275

276
  For exclusive = true:
277
       $$
278
       hstart = max(0, i * strides[0] - pad_height_top)
279 280 281 282 283
       $$
       $$
       hend = min(H, hstart + ksize[0])
       $$
       $$
284
       wstart = max(0, j * strides[1] - pad_width_left)
285 286 287 288 289 290 291
       $$
       $$
       wend = min(W, wstart + ksize[1])
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
       $$
292

293
)DOC");
294 295
}

296 297 298 299 300 301 302
template <typename T>
class Pool2dOpGradGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> grad_op) const override {
F
From00 已提交
303
    grad_op->SetType("pool2d_double_grad");
304 305 306 307 308 309
    grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X")));
    grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out")));
    grad_op->SetAttrMap(this->Attrs());
  }
};

C
chengduo 已提交
310 311
class PoolOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
312
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
C
chengduo 已提交
313
      const override {
314 315
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
C
chengduo 已提交
316 317 318
  }
};

Y
Yu Yang 已提交
319
void Pool3dOpMaker::Make() {
K
kexinzhao 已提交
320 321
  AddInput("X",
           "(Tensor) The input tensor of pooling operator. "
322 323
           "The format of input tensor is NCDHW or NDHWC, where N is batch "
           "size, C is "
K
kexinzhao 已提交
324 325 326
           "the number of channels, and D, H and W is the depth, height and "
           "width of "
           "the feature, respectively.");
327
  AddOutput("Out",
C
chengduoZH 已提交
328
            "(Tensor) The output tensor of pooling operator."
329
            "The format of output tensor is also NCDHW or NDHWC, "
K
kexinzhao 已提交
330 331
            "where N is batch size, C is "
            "the number of channels, and D, H and W is the depth, height and "
332
            "width of the feature, respectively.");
333

C
chengduoZH 已提交
334
  AddAttr<std::string>("pooling_type",
K
kexinzhao 已提交
335
                       "(string) Pooling type, can be \"max\" for max-pooling "
C
chengduoZH 已提交
336
                       "and \"avg\" for average-pooling.")
337
      .InEnum({"max", "avg"});
K
kexinzhao 已提交
338 339 340 341
  AddAttr<std::vector<int>>(
      "ksize",
      "(vector<int>) The pooling window size(depth, height, "
      "width) of pooling operator. "
C
chengduoZH 已提交
342
      "If global_pooling = true, ksize and paddings will "
343
      "be ignored.");
C
chengduoZH 已提交
344 345
  AddAttr<bool>(
      "global_pooling",
K
Kaipeng Deng 已提交
346 347 348
      "(bool) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored. "
      "Default False")
349
      .SetDefault(false);
K
kexinzhao 已提交
350 351 352 353
  AddAttr<std::vector<int>>(
      "strides",
      "(vector<int>, default {1,1,1}) Strides(depth, height, "
      "width) of the pooling operator.")
354 355
      .SetDefault({1, 1, 1});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
C
fix bug  
chengduoZH 已提交
356 357
  AddAttr<std::vector<int>>(
      "paddings",
358 359 360 361
      "(vector<int>, default {0,0,0}), paddings(pad_depth_front, "
      "pad_depth_back, "
      "pad_height_top, pad_height_bottom, pad_width_left, pad_width_right"
      ") of pooling operator. "
C
chengduoZH 已提交
362
      "If global_pooling = true, ksize and paddings will be ignored.")
363 364
      .SetDefault({0, 0, 0});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
365 366
  AddAttr<bool>(
      "exclusive",
K
Kaipeng Deng 已提交
367
      "(bool) When true, will exclude the zero-padding in the "
368
      "averaging calculating, otherwise, include the zero-padding. Note, it "
K
Kaipeng Deng 已提交
369 370
      "is only used when pooling_type is avg. The default is True. "
      "Default True")
371
      .SetDefault(true);
372 373
  AddAttr<bool>(
      "adaptive",
K
Kaipeng Deng 已提交
374
      "(bool) When true, will perform adaptive pooling instead, "
375 376
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
K
Kaipeng Deng 已提交
377 378
      "pooling in each grid area to get output pooling value. "
      "Default False")
379
      .SetDefault(false);
380 381
  AddAttr<bool>(
      "ceil_mode",
K
Kaipeng Deng 已提交
382
      "(bool) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
383
      "output height and width. False is the default. If it is set to False, "
K
Kaipeng Deng 已提交
384
      "the floor function will be used. Default False")
385
      .SetDefault(false);
386 387
  AddAttr<std::string>(
      "data_format",
388 389 390
      "(string, default NCDHW) Only used in "
      "An optional string from: \"NDHWC\", \"NCDHW\". "
      "Defaults to \"NDHWC\". Specify the data format of the output data, "
391
      "the input will be transformed automatically. ")
392 393 394 395 396 397 398
      .SetDefault("NCDHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
399 400 401 402 403
  AddAttr<bool>(
      "use_cudnn",
      "(bool) Only used in cudnn kernel, need install cudnn. Default False")
      .SetDefault(false)
      .AsExtra();
404
  AddComment(R"DOC(
K
Kaipeng Deng 已提交
405 406
This operation calculates the output based on
the input, pooling_type, pool_size, pool_stride, and pool_padding parameters.
407
Input(X) and output(Out) are in NCDHW or NDHWC format, where N is batch
K
kexinzhao 已提交
408
size, C is the number of channels, and D, H and W are the depth, height and
K
Kaipeng Deng 已提交
409 410
width of the feature, respectively. Parameters(pool_size, pool_stride, pool_padding)
hold three integer elements. These three elements represent depth, height and
K
kexinzhao 已提交
411
width, respectively. The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
412 413 414

Example:
  Input:
K
kexinzhao 已提交
415
       X shape: $(N, C, D_{in}, H_{in}, W_{in})$
C
chengduoZH 已提交
416
  Output:
K
kexinzhao 已提交
417
       Out shape: $(N, C, D_{out}, H_{out}, W_{out})$
418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440

  For pool_padding = "SAME":
       $$
       D_{out} = \\frac{(D_{in} + strides[0] - 1)}{strides[0]}
       $$
       $$
       H_{out} = \\frac{(H_{in} + strides[1] - 1)}{strides[1]}
       $$
       $$
       W_{out} = \\frac{(W_{in} + strides[2] - 1)}{strides[2]}
       $$

  For pool_padding = "VALID":
       $$
       D_{out} = \\frac{(D_{in} - ksize[0] + strides[0])}{strides[0]}
       $$
       $$
       H_{out} = \\frac{(H_{in} - ksize[1] + strides[1])}{strides[1]}
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[2] + strides[2])}{strides[2]}
       $$

441
  For ceil_mode = false:
442
       $$
443
       D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back)}{strides[0]} + 1
444 445
       $$
       $$
446
       H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom)}{strides[1]} + 1
447 448
       $$
       $$
449
       W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right)}{strides[2]} + 1
450
       $$
451
  For ceil_mode = true:
452
       $$
453
       D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back + strides[0] -1)}{strides[0]} + 1
454 455
       $$
       $$
456
       H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom + strides[1] -1)}{strides[1]} + 1
457 458
       $$
       $$
459
       W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right + strides[2] -1)}{strides[2]} + 1
460
       $$
D
dengkaipeng 已提交
461

462
  For exclusive = false:
463
       $$
464
       dstart = i * strides[0] - pad_depth_front
465 466 467 468 469
       $$
       $$
       dend = dstart + ksize[0]
       $$
       $$
470
       hstart = j * strides[1] - pad_height_top
471 472 473 474 475
       $$
       $$
       hend = hstart + ksize[1]
       $$
       $$
476
       wstart = k * strides[2] -  pad_width_left
477 478 479 480 481 482 483
       $$
       $$
       wend = wstart + ksize[2]
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{ksize[0] * ksize[1] * ksize[2]}
       $$
484

485
  For exclusive = true:
486
       $$
487
       dstart = max(0, i * strides[0] - pad_depth_front)
488 489 490 491 492
       $$
       $$
       dend = min(D, dstart + ksize[0])
       $$
       $$
493 494 495
       hstart = max(0, j * strides[1] - pad_height_top)
       $$
       $$
496 497 498
       hend = min(H, hstart + ksize[1])
       $$
       $$
499
       wstart = max(0, k * strides[2] - pad_width_left)
500 501 502 503 504 505 506
       $$
       $$
       wend = min(W, wstart + ksize[2])
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
       $$
K
kexinzhao 已提交
507

508
)DOC");
509
}
510 511 512 513 514
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

515 516
DECLARE_INFER_SHAPE_FUNCTOR(pool2d,
                            Pool2dInferShapeFunctor,
517
                            PD_INFER_META(phi::Pool2DInferMeta));
518 519
DECLARE_INFER_SHAPE_FUNCTOR(pool2d_grad,
                            Pool2dGradInferShapeFunctor,
520
                            PD_INFER_META(phi::UnchangedInferMeta));
F
From00 已提交
521 522
DECLARE_INFER_SHAPE_FUNCTOR(pool2d_double_grad,
                            Pool2dDoubleGradInferShapeFunctor,
523
                            PD_INFER_META(phi::Pool2DInferMeta));
F
From00 已提交
524

H
hong 已提交
525
REGISTER_OPERATOR(
526 527 528 529
    pool2d,
    ops::PoolOp,
    ops::Pool2dOpMaker,
    ops::PoolOpInferVarType,
H
hong 已提交
530
    paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
F
From00 已提交
531 532
    paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
    Pool2dInferShapeFunctor);
533 534
REGISTER_OPERATOR(pool2d_grad,
                  ops::PoolOpGrad,
535
                  ops::Pool2dOpGradGradMaker<paddle::framework::OpDesc>,
F
From00 已提交
536 537
                  ops::Pool2dOpGradGradMaker<paddle::imperative::OpBase>,
                  Pool2dGradInferShapeFunctor);
538 539
REGISTER_OPERATOR(pool2d_double_grad,
                  ops::PoolOp,
F
From00 已提交
540 541
                  Pool2dDoubleGradInferShapeFunctor);

542 543
DECLARE_INFER_SHAPE_FUNCTOR(pool3d,
                            Pool3dInferShapeFunctor,
F
From00 已提交
544
                            PD_INFER_META(phi::PoolInferMeta));
545 546
DECLARE_INFER_SHAPE_FUNCTOR(pool3d_grad,
                            Pool3dGradInferShapeFunctor,
547
                            PD_INFER_META(phi::UnchangedInferMeta));
548

H
hong 已提交
549
REGISTER_OPERATOR(
550 551 552 553
    pool3d,
    ops::PoolOp,
    ops::Pool3dOpMaker,
    ops::PoolOpInferVarType,
H
hong 已提交
554
    paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
F
From00 已提交
555 556 557
    paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
    Pool3dInferShapeFunctor);
REGISTER_OPERATOR(pool3d_grad, ops::PoolOpGrad, Pool3dGradInferShapeFunctor);