pool_op.cc 19.5 KB
Newer Older
1
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
2 3 4 5 6 7 8 9 10 11 12 13 14

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

Y
Yi Wang 已提交
15
#include "paddle/fluid/operators/pool_op.h"
16

17
#include <unordered_map>
18

F
From00 已提交
19 20
#include "paddle/fluid/framework/infershape_utils.h"
#include "paddle/fluid/framework/op_registry.h"
21
#include "paddle/fluid/platform/device/gpu/gpu_dnn.h"
F
From00 已提交
22 23 24
#include "paddle/phi/core/infermeta_utils.h"
#include "paddle/phi/infermeta/backward.h"
#include "paddle/phi/infermeta/unary.h"
25 26 27
#ifdef PADDLE_WITH_MKLDNN
#include "paddle/fluid/platform/mkldnn_helper.h"
#endif
28 29 30 31

namespace paddle {
namespace operators {

32 33 34
bool CanMKLDNNSupportPool(const framework::ExecutionContext& ctx) {
  if (ctx.Attr<bool>("adaptive") == false) return true;
  // (jczaja): oneDNN is supporting only unchangable in size pool window
35
  auto src_tz = phi::vectorize(ctx.Input<phi::DenseTensor>("X")->dims());
36 37 38
  if (!ctx.HasAttr("ksize")) {
    return false;
  }
39 40
  std::vector<int> ksize = ctx.Attr<std::vector<int>>("ksize");
  // Fast but not exhustive check
41 42
  return ((src_tz[src_tz.size() - 1] % ksize[1] == 0) &&
          (src_tz[src_tz.size() - 2] % ksize[0] == 0));
43 44
}

45
framework::OpKernelType PoolOp::GetExpectedKernelType(
C
chengduo 已提交
46
    const framework::ExecutionContext& ctx) const {
47
  auto data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
M
mozga-intel 已提交
48

49 50 51
  // NOTE(jiahongyu): Below codes originally enclosed by PADDLE_WITH_MKLDNN
  this->SetDnnFallback(!CanMKLDNNSupportPool(ctx));
  // NOTE(jiahongyu) END: Above codes originally enclosed by PADDLE_WITH_MKLDNN
52

53
  return framework::OpKernelType(data_type, ctx.GetPlace());
54 55
}

56
framework::OpKernelType PoolOp::GetKernelTypeForVar(
57
    const std::string& var_name,
58
    const phi::DenseTensor& tensor,
59 60
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
61 62
  if ((expected_kernel_type.data_layout_ == phi::DataLayout::kMKLDNN) &&
      (tensor.layout() != phi::DataLayout::kMKLDNN)) {
63 64 65
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
66
    auto dl = phi::StringToDataLayout(data_format);
67 68
    // Some models may have intentionally set "AnyLayout" for pool
    // op. Treat this as NCHW (default data_format value)
69
    if (dl != phi::DataLayout::kAnyLayout) {
70 71
      return framework::OpKernelType(
          expected_kernel_type.data_type_, tensor.place(), dl);
72 73 74
    }
  }
#endif
75 76
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
77 78
}

79
framework::OpKernelType PoolOpGrad::GetExpectedKernelType(
C
chengduo 已提交
80
    const framework::ExecutionContext& ctx) const {
81
  auto input_data_type = OperatorWithKernel::IndicateVarDataType(ctx, "X");
M
mozga-intel 已提交
82

83 84 85
  // NOTE(jiahongyu): Below codes originally enclosed by PADDLE_WITH_MKLDNN
  this->SetDnnFallback(!CanMKLDNNSupportPool(ctx));
  // NOTE(jiahongyu): Above codes originally enclosed by PADDLE_WITH_MKLDNN
86

87
  return framework::OpKernelType(input_data_type, ctx.GetPlace());
88 89
}

90
framework::OpKernelType PoolOpGrad::GetKernelTypeForVar(
91
    const std::string& var_name,
92
    const phi::DenseTensor& tensor,
93 94
    const framework::OpKernelType& expected_kernel_type) const {
#ifdef PADDLE_WITH_MKLDNN
95 96
  if ((expected_kernel_type.data_layout_ == phi::DataLayout::kMKLDNN) &&
      (tensor.layout() != phi::DataLayout::kMKLDNN)) {
97 98 99 100 101
    auto attrs = Attrs();
    auto ar = paddle::framework::AttrReader(attrs);
    const std::string data_format = ar.Get<std::string>("data_format");
    return framework::OpKernelType(expected_kernel_type.data_type_,
                                   tensor.place(),
102
                                   phi::StringToDataLayout(data_format));
103 104
  }
#endif
105 106
  return framework::OpKernelType(
      expected_kernel_type.data_type_, tensor.place(), tensor.layout());
107 108
}

Y
Yu Yang 已提交
109
void Pool2dOpMaker::Make() {
110 111
  AddInput(
      "X",
C
chengduoZH 已提交
112
      "(Tensor) The input tensor of pooling operator. "
K
kexinzhao 已提交
113 114 115
      "The format of input tensor is NCHW, where N is batch size, C is the "
      "number of channels, H is the height of the feature, "
      "and W is the width of the feature.");
116
  AddOutput("Out",
K
kexinzhao 已提交
117 118 119 120
            "(Tensor) The output tensor of pooling operator. "
            "The format of output tensor is also NCHW, "
            "where N is batch size, C is the number of channels, "
            "H is the height of the feature, "
121
            "and W is the width of the feature.");
122

C
chengduoZH 已提交
123
  AddAttr<std::string>("pooling_type",
C
chengduoZH 已提交
124 125
                       "(string), pooling type, can be \"max\" for max-pooling "
                       "and \"avg\" for average-pooling.")
126
      .InEnum({"max", "avg"});
C
fix bug  
chengduoZH 已提交
127
  AddAttr<std::vector<int>>("ksize",
K
kexinzhao 已提交
128 129
                            "(vector<int>) The pooling window "
                            "size(height, width) of the pooling operator. "
C
chengduoZH 已提交
130
                            "If global_pooling = true, ksize and paddings will "
131 132
                            "be ignored.")
      .SupportTensor();
133 134
  AddAttr<bool>(
      "global_pooling",
K
Kaipeng Deng 已提交
135 136 137
      "(bool) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored. "
      "Default False.")
138
      .SetDefault(false);
K
kexinzhao 已提交
139 140 141
  AddAttr<std::vector<int>>("strides",
                            "(vector<int>, default {1, 1}), strides(height, "
                            "width) of pooling operator.")
142 143
      .SetDefault({1, 1});
  // TODO(Chengduo): Add checker. (Currently,
C
fix doc  
chengduoZH 已提交
144 145 146
  // TypedAttrChecker don't support vector type.)
  AddAttr<std::vector<int>>(
      "paddings",
147 148
      "(vector<int>, default {0,0}), paddings(height_top, height_bottom, "
      "width_left, wifth_right) of pooling operator."
149
      "If global_pooling = true, paddings and kernel size will be ignored.")
150
      .SetDefault({0, 0});
151 152
  AddAttr<bool>(
      "exclusive",
K
Kaipeng Deng 已提交
153
      "(bool) When true, will exclude the zero-padding in the "
154
      "averaging calculating, otherwise, include the zero-padding. Note, it "
K
Kaipeng Deng 已提交
155 156
      "is only used when pooling_type is avg. The default is True. "
      "Default True.")
157
      .SetDefault(true);
158 159
  AddAttr<bool>(
      "adaptive",
K
Kaipeng Deng 已提交
160
      "(bool) When true, will perform adaptive pooling instead, "
161 162
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
K
Kaipeng Deng 已提交
163 164
      "pooling in each grid area to get output pooling value. "
      "Default False.")
165
      .SetDefault(false);
166 167
  AddAttr<bool>(
      "ceil_mode",
K
Kaipeng Deng 已提交
168
      "(bool) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
169
      "output height and width. False is the default. If it is set to False, "
K
Kaipeng Deng 已提交
170
      "the floor function will be used. Default False")
171
      .SetDefault(false);
172 173 174 175 176 177
  AddAttr<std::string>(
      "data_format",
      "(string, default NCHW) Only used in "
      "An optional string from: \"NHWC\", \"NCHW\". "
      "Defaults to \"NHWC\". Specify the data format of the output data, "
      "the input will be transformed automatically. ")
178 179 180 181 182 183 184
      .SetDefault("NCHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
185
  // TODO(dzhwinter): need to registered layout transform function
186 187 188 189 190
  AddAttr<bool>(
      "use_cudnn",
      "(bool) Only used in cudnn kernel, need install cudnn. Default False")
      .SetDefault(false)
      .AsExtra();
191
  AddComment(R"DOC(
K
Kaipeng Deng 已提交
192 193 194
This operation calculates the pooling output based on
the input, pooling_type and pool_size, pool_stride, pool_padding parameters.
Input(X) and Output(Out) are in NCHW or NHWC format, where N is batch size, C is the
K
kexinzhao 已提交
195
number of channels, H is the height of the feature, and W is the width of the feature.
K
Kaipeng Deng 已提交
196
Parameters(pool_size, pool_stride, pool_padding) hold two integer elements.
C
fix doc  
chengduoZH 已提交
197
These two elements represent height and width, respectively.
C
chengduoZH 已提交
198 199
The input(X) size and output(Out) size may be different.

200
Example:
F
fengjiayi 已提交
201

C
chengduoZH 已提交
202
  Input:
F
fengjiayi 已提交
203

K
kexinzhao 已提交
204
       X shape: $(N, C, H_{in}, W_{in})$
F
fengjiayi 已提交
205

C
chengduoZH 已提交
206
  Output:
F
fengjiayi 已提交
207

K
kexinzhao 已提交
208
       Out shape: $(N, C, H_{out}, W_{out})$
F
fengjiayi 已提交
209

210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225
  For pool_padding = "SAME":
       $$
       H_{out} = \\frac{(H_{in} + strides[0] - 1)}{strides[0]}
       $$
       $$
       W_{out} = \\frac{(W_{in} + strides[1] - 1)}{strides[1]}
       $$

  For pool_padding = "VALID":
       $$
       H_{out} = \\frac{(H_{in} - ksize[0] + strides[0])}{strides[0]}
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[1] + strides[1])}{strides[1]}
       $$

226 227
  For ceil_mode = false:
       $$
228
       H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom}{strides[0]} + 1
F
fengjiayi 已提交
229 230
       $$
       $$
231
       W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right}{strides[1]} + 1
K
kexinzhao 已提交
232
       $$
233

234 235
  For ceil_mode = true:
       $$
236
       H_{out} = \\frac{(H_{in} - ksize[0] + pad_height_top + pad_height_bottom + strides[0] - 1)}{strides[0]} + 1
F
fengjiayi 已提交
237 238
       $$
       $$
239
       W_{out} = \\frac{(W_{in} - ksize[1] + pad_width_left + pad_width_right + strides[1] - 1)}{strides[1]} + 1
240
       $$
K
kexinzhao 已提交
241

242
  For exclusive = false:
243
       $$
244
       hstart = i * strides[0] - pad_height_top
245 246 247 248 249
       $$
       $$
       hend = hstart + ksize[0]
       $$
       $$
250
       wstart = j * strides[1] - pad_width_left
251 252 253 254 255 256 257
       $$
       $$
       wend = wstart + ksize[1]
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{ksize[0] * ksize[1]}
       $$
258

259
  For exclusive = true:
260
       $$
261
       hstart = max(0, i * strides[0] - pad_height_top)
262 263 264 265 266
       $$
       $$
       hend = min(H, hstart + ksize[0])
       $$
       $$
267
       wstart = max(0, j * strides[1] - pad_width_left)
268 269 270 271 272 273 274
       $$
       $$
       wend = min(W, wstart + ksize[1])
       $$
       $$
       Output(i ,j) = \\frac{sum(Input[hstart:hend, wstart:wend])}{(hend - hstart) * (wend - wstart)}
       $$
275

276
)DOC");
277 278
}

279 280 281 282 283 284 285
template <typename T>
class Pool2dOpGradGradMaker : public framework::SingleGradOpMaker<T> {
 public:
  using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

 protected:
  void Apply(GradOpPtr<T> grad_op) const override {
F
From00 已提交
286
    grad_op->SetType("pool2d_double_grad");
287 288 289 290 291 292
    grad_op->SetInput("X", this->OutputGrad(framework::GradVarName("X")));
    grad_op->SetOutput("Out", this->InputGrad(framework::GradVarName("Out")));
    grad_op->SetAttrMap(this->Attrs());
  }
};

C
chengduo 已提交
293 294
class PoolOpInferVarType : public framework::PassInDtypeAndVarTypeToOutput {
 protected:
295
  std::unordered_map<std::string, std::string>& GetInputOutputWithSameType()
C
chengduo 已提交
296
      const override {
297 298
    static std::unordered_map<std::string, std::string> m{{"X", /*->*/ "Out"}};
    return m;
C
chengduo 已提交
299 300 301
  }
};

Y
Yu Yang 已提交
302
void Pool3dOpMaker::Make() {
K
kexinzhao 已提交
303 304
  AddInput("X",
           "(Tensor) The input tensor of pooling operator. "
305 306
           "The format of input tensor is NCDHW or NDHWC, where N is batch "
           "size, C is "
K
kexinzhao 已提交
307 308 309
           "the number of channels, and D, H and W is the depth, height and "
           "width of "
           "the feature, respectively.");
310
  AddOutput("Out",
C
chengduoZH 已提交
311
            "(Tensor) The output tensor of pooling operator."
312
            "The format of output tensor is also NCDHW or NDHWC, "
K
kexinzhao 已提交
313 314
            "where N is batch size, C is "
            "the number of channels, and D, H and W is the depth, height and "
315
            "width of the feature, respectively.");
316

C
chengduoZH 已提交
317
  AddAttr<std::string>("pooling_type",
K
kexinzhao 已提交
318
                       "(string) Pooling type, can be \"max\" for max-pooling "
C
chengduoZH 已提交
319
                       "and \"avg\" for average-pooling.")
320
      .InEnum({"max", "avg"});
K
kexinzhao 已提交
321 322 323 324
  AddAttr<std::vector<int>>(
      "ksize",
      "(vector<int>) The pooling window size(depth, height, "
      "width) of pooling operator. "
C
chengduoZH 已提交
325
      "If global_pooling = true, ksize and paddings will "
326
      "be ignored.");
C
chengduoZH 已提交
327 328
  AddAttr<bool>(
      "global_pooling",
K
Kaipeng Deng 已提交
329 330 331
      "(bool) Whether to use the global pooling. "
      "If global_pooling = true, kernel size and paddings will be ignored. "
      "Default False")
332
      .SetDefault(false);
K
kexinzhao 已提交
333 334 335 336
  AddAttr<std::vector<int>>(
      "strides",
      "(vector<int>, default {1,1,1}) Strides(depth, height, "
      "width) of the pooling operator.")
337 338
      .SetDefault({1, 1, 1});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
C
fix bug  
chengduoZH 已提交
339 340
  AddAttr<std::vector<int>>(
      "paddings",
341 342 343 344
      "(vector<int>, default {0,0,0}), paddings(pad_depth_front, "
      "pad_depth_back, "
      "pad_height_top, pad_height_bottom, pad_width_left, pad_width_right"
      ") of pooling operator. "
C
chengduoZH 已提交
345
      "If global_pooling = true, ksize and paddings will be ignored.")
346 347
      .SetDefault({0, 0, 0});  // TODO(Chengduo): Add checker. (Currently,
                               // TypedAttrChecker don't support vector type.)
348 349
  AddAttr<bool>(
      "exclusive",
K
Kaipeng Deng 已提交
350
      "(bool) When true, will exclude the zero-padding in the "
351
      "averaging calculating, otherwise, include the zero-padding. Note, it "
K
Kaipeng Deng 已提交
352 353
      "is only used when pooling_type is avg. The default is True. "
      "Default True")
354
      .SetDefault(true);
355 356
  AddAttr<bool>(
      "adaptive",
K
Kaipeng Deng 已提交
357
      "(bool) When true, will perform adaptive pooling instead, "
358 359
      "output shape in H and W dimensions will be same as ksize, input data "
      "will be divided into grids specify by ksize averagely and perform "
K
Kaipeng Deng 已提交
360 361
      "pooling in each grid area to get output pooling value. "
      "Default False")
362
      .SetDefault(false);
363 364
  AddAttr<bool>(
      "ceil_mode",
K
Kaipeng Deng 已提交
365
      "(bool) Whether to use the ceil function to calculate "
W
wanghaoshuang 已提交
366
      "output height and width. False is the default. If it is set to False, "
K
Kaipeng Deng 已提交
367
      "the floor function will be used. Default False")
368
      .SetDefault(false);
369 370
  AddAttr<std::string>(
      "data_format",
371 372 373
      "(string, default NCDHW) Only used in "
      "An optional string from: \"NDHWC\", \"NCDHW\". "
      "Defaults to \"NDHWC\". Specify the data format of the output data, "
374
      "the input will be transformed automatically. ")
375 376 377 378 379 380 381
      .SetDefault("NCDHW");
  AddAttr<std::string>(
      "padding_algorithm",
      "(string, default \"EXPLICIT\") An optional string from: \"EXPLICIT\","
      "\"SAME\",\"VALID\". Set to \"EXPLICIT\" for explicit padding. "
      "Set to \"SAME\" or \"VALID\" for algorithm of padding. ")
      .SetDefault("EXPLICIT");
382 383 384 385 386
  AddAttr<bool>(
      "use_cudnn",
      "(bool) Only used in cudnn kernel, need install cudnn. Default False")
      .SetDefault(false)
      .AsExtra();
387
  AddComment(R"DOC(
K
Kaipeng Deng 已提交
388 389
This operation calculates the output based on
the input, pooling_type, pool_size, pool_stride, and pool_padding parameters.
390
Input(X) and output(Out) are in NCDHW or NDHWC format, where N is batch
K
kexinzhao 已提交
391
size, C is the number of channels, and D, H and W are the depth, height and
K
Kaipeng Deng 已提交
392 393
width of the feature, respectively. Parameters(pool_size, pool_stride, pool_padding)
hold three integer elements. These three elements represent depth, height and
K
kexinzhao 已提交
394
width, respectively. The input(X) size and output(Out) size may be different.
C
chengduoZH 已提交
395 396 397

Example:
  Input:
K
kexinzhao 已提交
398
       X shape: $(N, C, D_{in}, H_{in}, W_{in})$
C
chengduoZH 已提交
399
  Output:
K
kexinzhao 已提交
400
       Out shape: $(N, C, D_{out}, H_{out}, W_{out})$
401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423

  For pool_padding = "SAME":
       $$
       D_{out} = \\frac{(D_{in} + strides[0] - 1)}{strides[0]}
       $$
       $$
       H_{out} = \\frac{(H_{in} + strides[1] - 1)}{strides[1]}
       $$
       $$
       W_{out} = \\frac{(W_{in} + strides[2] - 1)}{strides[2]}
       $$

  For pool_padding = "VALID":
       $$
       D_{out} = \\frac{(D_{in} - ksize[0] + strides[0])}{strides[0]}
       $$
       $$
       H_{out} = \\frac{(H_{in} - ksize[1] + strides[1])}{strides[1]}
       $$
       $$
       W_{out} = \\frac{(W_{in} - ksize[2] + strides[2])}{strides[2]}
       $$

424
  For ceil_mode = false:
425
       $$
426
       D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back)}{strides[0]} + 1
427 428
       $$
       $$
429
       H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom)}{strides[1]} + 1
430 431
       $$
       $$
432
       W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right)}{strides[2]} + 1
433
       $$
434
  For ceil_mode = true:
435
       $$
436
       D_{out} = \\frac{(D_{in} - ksize[0] + pad_depth_front + pad_depth_back + strides[0] -1)}{strides[0]} + 1
437 438
       $$
       $$
439
       H_{out} = \\frac{(H_{in} - ksize[1] + pad_height_top + pad_height_bottom + strides[1] -1)}{strides[1]} + 1
440 441
       $$
       $$
442
       W_{out} = \\frac{(W_{in} - ksize[2] + pad_width_left + pad_width_right + strides[2] -1)}{strides[2]} + 1
443
       $$
D
dengkaipeng 已提交
444

445
  For exclusive = false:
446
       $$
447
       dstart = i * strides[0] - pad_depth_front
448 449 450 451 452
       $$
       $$
       dend = dstart + ksize[0]
       $$
       $$
453
       hstart = j * strides[1] - pad_height_top
454 455 456 457 458
       $$
       $$
       hend = hstart + ksize[1]
       $$
       $$
459
       wstart = k * strides[2] -  pad_width_left
460 461 462 463 464 465 466
       $$
       $$
       wend = wstart + ksize[2]
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{ksize[0] * ksize[1] * ksize[2]}
       $$
467

468
  For exclusive = true:
469
       $$
470
       dstart = max(0, i * strides[0] - pad_depth_front)
471 472 473 474 475
       $$
       $$
       dend = min(D, dstart + ksize[0])
       $$
       $$
476 477 478
       hstart = max(0, j * strides[1] - pad_height_top)
       $$
       $$
479 480 481
       hend = min(H, hstart + ksize[1])
       $$
       $$
482
       wstart = max(0, k * strides[2] - pad_width_left)
483 484 485 486 487 488 489
       $$
       $$
       wend = min(W, wstart + ksize[2])
       $$
       $$
       Output(i ,j, k) = \\frac{sum(Input[dstart:dend, hstart:hend, wstart:wend])}{(dend - dstart) * (hend - hstart) * (wend - wstart)}
       $$
K
kexinzhao 已提交
490

491
)DOC");
492
}
493 494 495 496 497
}  // namespace operators
}  // namespace paddle

namespace ops = paddle::operators;

498 499
DECLARE_INFER_SHAPE_FUNCTOR(pool2d,
                            Pool2dInferShapeFunctor,
500
                            PD_INFER_META(phi::Pool2DInferMeta));
501 502
DECLARE_INFER_SHAPE_FUNCTOR(pool2d_grad,
                            Pool2dGradInferShapeFunctor,
503
                            PD_INFER_META(phi::UnchangedInferMeta));
F
From00 已提交
504 505
DECLARE_INFER_SHAPE_FUNCTOR(pool2d_double_grad,
                            Pool2dDoubleGradInferShapeFunctor,
506
                            PD_INFER_META(phi::Pool2DInferMeta));
F
From00 已提交
507

H
hong 已提交
508
REGISTER_OPERATOR(
509 510 511 512
    pool2d,
    ops::PoolOp,
    ops::Pool2dOpMaker,
    ops::PoolOpInferVarType,
H
hong 已提交
513
    paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
F
From00 已提交
514 515
    paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
    Pool2dInferShapeFunctor);
516 517
REGISTER_OPERATOR(pool2d_grad,
                  ops::PoolOpGrad,
518
                  ops::Pool2dOpGradGradMaker<paddle::framework::OpDesc>,
F
From00 已提交
519 520
                  ops::Pool2dOpGradGradMaker<paddle::imperative::OpBase>,
                  Pool2dGradInferShapeFunctor);
521 522
REGISTER_OPERATOR(pool2d_double_grad,
                  ops::PoolOp,
F
From00 已提交
523 524
                  Pool2dDoubleGradInferShapeFunctor);

525 526
DECLARE_INFER_SHAPE_FUNCTOR(pool3d,
                            Pool3dInferShapeFunctor,
F
From00 已提交
527
                            PD_INFER_META(phi::PoolInferMeta));
528 529
DECLARE_INFER_SHAPE_FUNCTOR(pool3d_grad,
                            Pool3dGradInferShapeFunctor,
530
                            PD_INFER_META(phi::UnchangedInferMeta));
531

H
hong 已提交
532
REGISTER_OPERATOR(
533 534 535 536
    pool3d,
    ops::PoolOp,
    ops::Pool3dOpMaker,
    ops::PoolOpInferVarType,
H
hong 已提交
537
    paddle::framework::DefaultGradOpMaker<paddle::framework::OpDesc, true>,
F
From00 已提交
538 539 540
    paddle::framework::DefaultGradOpMaker<paddle::imperative::OpBase, true>,
    Pool3dInferShapeFunctor);
REGISTER_OPERATOR(pool3d_grad, ops::PoolOpGrad, Pool3dGradInferShapeFunctor);